code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" lowerCamelCase_ : int = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase_ : Dict = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase_ : Union[str, Any] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
81
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
from ...configuration_utils import PretrainedConfig from ...utils import logging A__ = logging.get_logger(__name__) A__ = { """facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""", } class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''timesformer''' def __init__( self , _snake_case=224 , _snake_case=16 , _snake_case=3 , _snake_case=8 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=1e-6 , _snake_case=True , _snake_case="divided_space_time" , _snake_case=0 , **_snake_case , ): """simple docstring""" super().__init__(**_snake_case ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = num_frames _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = intermediate_size _lowerCAmelCase = hidden_act _lowerCAmelCase = hidden_dropout_prob _lowerCAmelCase = attention_probs_dropout_prob _lowerCAmelCase = initializer_range _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = qkv_bias _lowerCAmelCase = attention_type _lowerCAmelCase = drop_path_rate
82
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
'''simple docstring''' import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin snake_case_ : Any = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowercase__ ( lowercase , unittest.TestCase ): lowercase__ = XLNetTokenizer lowercase__ = XLNetTokenizerFast lowercase__ = True lowercase__ = True def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase : List[Any] = XLNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : Optional[Any] = '<s>' _UpperCamelCase : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ ) def UpperCamelCase_ ( self : Optional[int] ): '''simple docstring''' _UpperCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<unk>' ) self.assertEqual(vocab_keys[1] ,'<s>' ) self.assertEqual(vocab_keys[-1] ,'<eod>' ) self.assertEqual(len(lowerCamelCase__ ) ,1006 ) def UpperCamelCase_ ( self : int ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size ,1000 ) def UpperCamelCase_ ( self : Any ): '''simple docstring''' _UpperCamelCase : Optional[Any] = XLNetTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ ) _UpperCamelCase : Dict = tokenizer.tokenize('This is a test' ) self.assertListEqual(lowerCamelCase__ ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) ,[285, 46, 10, 170, 382] ) _UpperCamelCase : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase__ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] ,) _UpperCamelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) self.assertListEqual(lowerCamelCase__ ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ) _UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ ) self.assertListEqual( lowerCamelCase__ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] ,) def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' _UpperCamelCase : Any = XLNetTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ) _UpperCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase__ ,[ SPIECE_UNDERLINE + '', 'i', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] ,) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['▁he', 'll', 'o'] ) def UpperCamelCase_ ( self : str ): '''simple docstring''' _UpperCamelCase : Optional[Any] = XLNetTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ) _UpperCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( lowerCamelCase__ ,[ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 'se', '.', ] ,) @slow def UpperCamelCase_ ( self : Optional[Any] ): '''simple docstring''' _UpperCamelCase : Optional[Any] = XLNetTokenizer.from_pretrained('xlnet-base-cased' ) _UpperCamelCase : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=lowerCamelCase__ ) _UpperCamelCase : Tuple = tokenizer.encode('multi-sequence build' ,add_special_tokens=lowerCamelCase__ ) _UpperCamelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ) _UpperCamelCase : Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' # fmt: off _UpperCamelCase : Any = {'input_ids': [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCamelCase__ ,model_name='xlnet-base-cased' ,revision='c841166438c31ec7ca9a106dee7bb312b73ae511' ,)
83
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCAmelCase_ :int = get_activation("""gelu""" ) self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) ) self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] ) lowerCAmelCase_ :List[str] = get_activation("""gelu""" ) lowerCAmelCase_ :Any = get_activation("""gelu_10""" ) lowerCAmelCase_ :Tuple = torch_builtin(__A ) lowerCAmelCase_ :Optional[Any] = geluaa(__A ) lowerCAmelCase_ :Union[str, Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__A ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: get_activation("""gelu""" ) get_activation("""gelu_10""" ) get_activation("""gelu_fast""" ) get_activation("""gelu_new""" ) get_activation("""gelu_python""" ) get_activation("""gelu_pytorch_tanh""" ) get_activation("""linear""" ) get_activation("""mish""" ) get_activation("""quick_gelu""" ) get_activation("""relu""" ) get_activation("""sigmoid""" ) get_activation("""silu""" ) get_activation("""swish""" ) get_activation("""tanh""" ) with self.assertRaises(__A ): get_activation("""bogus""" ) with self.assertRaises(__A ): get_activation(__A ) def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :Any = get_activation("""gelu""" ) lowerCAmelCase_ :Optional[Any] = 1 lowerCAmelCase_ :int = get_activation("""gelu""" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__A ): lowerCAmelCase_ :str = acta.a
84
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE : Any = { "configuration_roc_bert": ["ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoCBertConfig"], "tokenization_roc_bert": ["RoCBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: pass try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ "ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RoCBertForCausalLM", "RoCBertForMaskedLM", "RoCBertForMultipleChoice", "RoCBertForPreTraining", "RoCBertForQuestionAnswering", "RoCBertForSequenceClassification", "RoCBertForTokenClassification", "RoCBertLayer", "RoCBertModel", "RoCBertPreTrainedModel", "load_tf_weights_in_roc_bert", ] if TYPE_CHECKING: from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig from .tokenization_roc_bert import RoCBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: raise OptionalDependencyNotAvailable() try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roc_bert import ( ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, RoCBertForCausalLM, RoCBertForMaskedLM, RoCBertForMultipleChoice, RoCBertForPreTraining, RoCBertForQuestionAnswering, RoCBertForSequenceClassification, RoCBertForTokenClassification, RoCBertLayer, RoCBertModel, RoCBertPreTrainedModel, load_tf_weights_in_roc_bert, ) else: import sys _SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCamelCase__ = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : List[str] = test_results.split(' ' ) __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : List[Any] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __lowerCAmelCase : int = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCamelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[int] = {} __lowerCAmelCase : int = None __lowerCAmelCase : List[Any] = False for line in failures_short_lines.split('\n' ): if re.search(r'_ \[doctest\]' , _UpperCamelCase ): __lowerCAmelCase : List[str] = True __lowerCAmelCase : Union[str, Any] = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): __lowerCAmelCase : Union[str, Any] = line __lowerCAmelCase : Any = False return failures class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = title __lowerCAmelCase : List[Any] = doc_test_results['time_spent'].split(',' )[0] __lowerCAmelCase : Optional[int] = doc_test_results['success'] __lowerCAmelCase : Dict = doc_test_results['failures'] __lowerCAmelCase : Tuple = self.n_success + self.n_failures # Failures and success of the modeling tests __lowerCAmelCase : Optional[int] = doc_test_results @property def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = [self._time_spent] __lowerCAmelCase : int = 0 for time in time_spent: __lowerCAmelCase : Tuple = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_SCREAMING_SNAKE_CASE ) == 1: __lowerCAmelCase : Dict = [0, 0, time_parts[0]] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f"{int(_SCREAMING_SNAKE_CASE )}h{int(_SCREAMING_SNAKE_CASE )}m{int(_SCREAMING_SNAKE_CASE )}s" @property def __lowerCamelCase ( self ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def __lowerCamelCase ( self ): return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def __lowerCamelCase ( self ): return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" f" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def __lowerCamelCase ( self ): __lowerCAmelCase : Any = 40 __lowerCAmelCase : int = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} __lowerCAmelCase : Any = '' for category, failures in category_failures.items(): if len(_SCREAMING_SNAKE_CASE ) == 0: continue if report != "": report += "\n\n" report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_SCREAMING_SNAKE_CASE ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f"The following examples had failures:\n\n\n{report}\n", }, } @property def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCamelCase ( ): __lowerCAmelCase : Dict = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(_SCREAMING_SNAKE_CASE )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) __lowerCAmelCase : Any = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.' __lowerCAmelCase : Optional[Any] = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = '' for key, value in failures.items(): __lowerCAmelCase : str = value[:2_00] + ' [Truncated]' if len(_SCREAMING_SNAKE_CASE ) > 2_50 else value failures_text += f"*{key}*\n_{value}_\n\n" __lowerCAmelCase : int = job_name __lowerCAmelCase : str = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: __lowerCAmelCase : int = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def __lowerCamelCase ( self ): if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) __lowerCAmelCase : int = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) __lowerCAmelCase : Union[str, Any] = sorted(self.doc_test_results.items() , key=lambda _SCREAMING_SNAKE_CASE : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): __lowerCAmelCase : List[Any] = f"*Num failures* :{len(job_result['failed'] )} \n" __lowerCAmelCase : Optional[int] = job_result['failures'] __lowerCAmelCase : Dict = self.get_reply_blocks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"Results for {job}" , blocks=_SCREAMING_SNAKE_CASE , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def __lowerCAmelCase (): __lowerCAmelCase : int = os.environ['GITHUB_RUN_ID'] __lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" __lowerCAmelCase : int = requests.get(_UpperCamelCase ).json() __lowerCAmelCase : int = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) __lowerCAmelCase : Optional[int] = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_UpperCamelCase ): __lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , _UpperCamelCase ) return {} def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : List[str] = {} if os.path.exists(_UpperCamelCase ): __lowerCAmelCase : Any = os.listdir(_UpperCamelCase ) for file in files: try: with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f: __lowerCAmelCase : List[str] = f.read() except UnicodeDecodeError as e: raise ValueError(F"Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}." ) from e return _artifact def __lowerCAmelCase (): class A__ : def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = name __lowerCAmelCase : str = [] def __str__( self ): return self.name def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): self.paths.append({'name': self.name, 'path': path} ) __lowerCAmelCase : Dict[str, Artifact] = {} __lowerCAmelCase : Optional[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: __lowerCAmelCase : Optional[int] = directory if artifact_name not in _available_artifacts: __lowerCAmelCase : Union[str, Any] = Artifact(_UpperCamelCase ) _available_artifacts[artifact_name].add_path(_UpperCamelCase ) return _available_artifacts if __name__ == "__main__": lowerCamelCase__ = get_job_links() lowerCamelCase__ = retrieve_available_artifacts() lowerCamelCase__ = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCamelCase__ = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCamelCase__ = github_actions_job_links.get("""run_doctests""") lowerCamelCase__ = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCamelCase__ = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = handle_test_results(artifact["""stats"""]) lowerCamelCase__ = failed lowerCamelCase__ = success lowerCamelCase__ = time_spent[1:-1] + """, """ lowerCamelCase__ = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCamelCase__ = line.replace("""FAILED """, """""") lowerCamelCase__ = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCamelCase__ , lowerCamelCase__ = line.split("""::""") else: lowerCamelCase__ , lowerCamelCase__ = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCamelCase__ = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCamelCase__ = all_failures[test] if test in all_failures else """N/A""" lowerCamelCase__ = failure break lowerCamelCase__ = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
86
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case_ : __A : CommonSchedulerState # setable values __A : jnp.ndarray __A : jnp.ndarray __A : Optional[int] = None @classmethod def __UpperCamelCase ( cls : str , lowercase_ : CommonSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray ) -> Optional[int]: return cls(common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ ) @dataclass class snake_case_ ( __A ): __A : DDPMSchedulerState class snake_case_ ( __A ,__A ): __A : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers] __A : jnp.dtype @property def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: return True @register_to_config def __init__( self : Optional[Any] , lowercase_ : int = 10_00 , lowercase_ : float = 0.00_01 , lowercase_ : float = 0.02 , lowercase_ : str = "linear" , lowercase_ : Optional[jnp.ndarray] = None , lowercase_ : str = "fixed_small" , lowercase_ : bool = True , lowercase_ : str = "epsilon" , lowercase_ : jnp.dtype = jnp.floataa , ) -> List[str]: lowercase__ : Any = dtype def __UpperCamelCase ( self : int , lowercase_ : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState: if common is None: lowercase__ : str = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Any = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ , ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : Optional[int] = None ) -> jnp.ndarray: return sample def __UpperCamelCase ( self : int , lowercase_ : DDPMSchedulerState , lowercase_ : int , lowercase_ : Tuple = () ) -> DDPMSchedulerState: lowercase__ : int = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : List[str] = (jnp.arange(0 , lowercase_ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowercase_ , timesteps=lowercase_ , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Tuple=None ) -> Optional[int]: lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : List[Any] = jnp.clip(lowercase_ , a_min=1E-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Union[str, Any] = jnp.log(jnp.clip(lowercase_ , a_min=1E-20 ) ) elif variance_type == "fixed_large": lowercase__ : Optional[int] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[str] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : Tuple = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : int = (predicted_variance + 1) / 2 lowercase__ : List[str] = frac * max_log + (1 - frac) * min_log return variance def __UpperCamelCase ( self : Any , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : int , lowercase_ : jnp.ndarray , lowercase_ : Optional[jax.random.KeyArray] = None , lowercase_ : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: lowercase__ : str = timestep if key is None: lowercase__ : Any = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : Union[str, Any] = jnp.split(lowercase_ , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : str = state.common.alphas_cumprod[t] lowercase__ : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[int] = 1 - alpha_prod_t lowercase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Any = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ''' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : str = jnp.clip(lowercase_ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Optional[int] = jax.random.split(lowercase_ , num=1 ) lowercase__ : List[str] = jax.random.normal(lowercase_ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowercase_ , lowercase_ , predicted_variance=lowercase_ ) ** 0.5) * noise lowercase__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Any = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowercase_ , state=lowercase_ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ) -> jnp.ndarray: return add_noise_common(state.common , lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ) -> jnp.ndarray: return get_velocity_common(state.common , lowercase_ , lowercase_ , lowercase_ ) def __len__( self : Any ) -> Optional[Any]: return self.config.num_train_timesteps
87
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
from decimal import Decimal, getcontext from math import ceil, factorial def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) __magic_name__ = precision __magic_name__ = ceil(precision / 14 ) __magic_name__ = 426880 * Decimal(10005 ).sqrt() __magic_name__ = 1 __magic_name__ = 13591409 __magic_name__ = Decimal(A_ ) for k in range(1, A_ ): __magic_name__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(A_ ) ** 3) linear_term += 545140134 exponential_term *= -262537412640768000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": __lowerCAmelCase : str = 50 print(F'''The first {n} digits of pi is: {pi(n)}''')
88
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
'''simple docstring''' def __lowerCamelCase ( lowerCAmelCase_ ) -> int: _a : Optional[int] = hex_num.strip() if not hex_num: raise ValueError('No value was passed to the function' ) _a : Dict = hex_num[0] == '-' if is_negative: _a : Optional[int] = hex_num[1:] try: _a : Optional[Any] = int(lowerCAmelCase_ , 16 ) except ValueError: raise ValueError('Invalid value was passed to the function' ) _a : int = '' while int_num > 0: _a : Union[str, Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('-' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
89
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __A = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __A = [file for file in filepaths if file != file.lower()] if upper_files: print(f'''{len(upper_files)} files contain uppercase characters:''') print("\n".join(upper_files) + "\n") __A = [file for file in filepaths if " " in file] if space_files: print(f'''{len(space_files)} files contain space characters:''') print("\n".join(space_files) + "\n") __A = [file for file in filepaths if "-" in file] if hyphen_files: print(f'''{len(hyphen_files)} files contain hyphen characters:''') print("\n".join(hyphen_files) + "\n") __A = [file for file in filepaths if os.sep not in file] if nodir_files: print(f'''{len(nodir_files)} files are not in a directory:''') print("\n".join(nodir_files) + "\n") __A = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
90
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
"""simple docstring""" from __future__ import annotations import math def _A (__a , __a , __a , __a , __a ) -> int: """simple docstring""" if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if not scores: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , __a , __a , __a ) , minimax(depth + 1 , node_index * 2 + 1 , __a , __a , __a ) , ) ) def _A () -> None: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] SCREAMING_SNAKE_CASE_ : Any = math.log(len(__a ) , 2 ) print(f'Optimal value : {minimax(0 , 0 , __a , __a , __a )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
91
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
import math from numpy import inf from scipy.integrate import quad def _a ( SCREAMING_SNAKE_CASE_ : float ): if num <= 0: raise ValueError("math domain error" ) return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0] def _a ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ): return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
92
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
'''simple docstring''' import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCAmelCase__ ( unittest.TestCase ): lowerCAmelCase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase_ : Any = hf_hub_download( repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowercase_ : Optional[Any] = VideoClassificationPipeline(model=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE , top_k=2 ) lowercase_ : Optional[Any] = [ example_video_filepath, '''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''', ] return video_classifier, examples def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" for example in examples: lowercase_ : Tuple = video_classifier(__SCREAMING_SNAKE_CASE ) self.assertEqual( __SCREAMING_SNAKE_CASE , [ {'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )}, {'''score''': ANY(__SCREAMING_SNAKE_CASE ), '''label''': ANY(__SCREAMING_SNAKE_CASE )}, ] , ) @require_torch def _snake_case ( self ): """simple docstring""" lowercase_ : Dict = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification''' lowercase_ : str = VideoMAEFeatureExtractor( size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} ) lowercase_ : List[Any] = pipeline( '''video-classification''' , model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , frame_sampling_rate=4 ) lowercase_ : List[Any] = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' ) lowercase_ : Optional[int] = video_classifier(__SCREAMING_SNAKE_CASE , top_k=2 ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , ) lowercase_ : Dict = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4 ) , [ [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}], ] , ) @require_tf def _snake_case ( self ): """simple docstring""" pass
93
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def __lowerCamelCase ( ): """simple docstring""" print('''Making key files...''' ) make_key_files('''rsa''' , 1024 ) print('''Key files generation successful.''' ) def __lowerCamelCase ( UpperCAmelCase_ : int ): """simple docstring""" print('''Generating prime p...''' ) a :Dict = rabinMiller.generate_large_prime(UpperCAmelCase_ ) print('''Generating prime q...''' ) a :Optional[Any] = rabinMiller.generate_large_prime(UpperCAmelCase_ ) a :int = p * q print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' ) while True: a :Optional[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(UpperCAmelCase_ , (p - 1) * (q - 1) ) == 1: break print('''Calculating d that is mod inverse of e...''' ) a :Union[str, Any] = cryptoMath.find_mod_inverse(UpperCAmelCase_ , (p - 1) * (q - 1) ) a :Tuple = (n, e) a :Optional[int] = (n, d) return (public_key, private_key) def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ): """simple docstring""" if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() a , a :List[str] = generate_key(UpperCAmelCase_ ) print(F'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(F'''{name}_pubkey.txt''' , '''w''' ) as out_file: out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' ) print(F'''Writing private key to file {name}_privkey.txt...''' ) with open(F'''{name}_privkey.txt''' , '''w''' ) as out_file: out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' ) if __name__ == "__main__": main()
94
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
from __future__ import annotations from math import pi def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): """simple docstring""" if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if inductance < 0: raise ValueError("Inductance cannot be negative" ) if frequency < 0: raise ValueError("Frequency cannot be negative" ) if reactance < 0: raise ValueError("Inductive reactance cannot be negative" ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
95
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
"""simple docstring""" import numpy as np import qiskit def _snake_case ( lowercase__ = 8 , lowercase__ = None ): _lowerCamelCase : str = np.random.default_rng(seed=lowercase__ ) # Roughly 25% of the qubits will contribute to the key. # So we take more than we need. _lowerCamelCase : List[str] = 6 * key_len # Measurement basis for Alice's qubits. _lowerCamelCase : int = rng.integers(2 , size=lowercase__ ) # The set of states Alice will prepare. _lowerCamelCase : str = rng.integers(2 , size=lowercase__ ) # Measurement basis for Bob's qubits. _lowerCamelCase : str = rng.integers(2 , size=lowercase__ ) # Quantum Circuit to simulate BB84 _lowerCamelCase : Dict = qiskit.QuantumCircuit(lowercase__ , name='BB84' ) # Alice prepares her qubits according to rules above. for index, _ in enumerate(lowercase__ ): if alice_state[index] == 1: bbaa_circ.x(lowercase__ ) if alice_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() # Bob measures the received qubits according to rules above. for index, _ in enumerate(lowercase__ ): if bob_basis[index] == 1: bbaa_circ.h(lowercase__ ) bbaa_circ.barrier() bbaa_circ.measure_all() # Simulate the quantum circuit. _lowerCamelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' ) # We only need to run one shot because the key is unique. # Multiple shots will produce the same key. _lowerCamelCase : List[Any] = qiskit.execute(lowercase__ , lowercase__ , shots=1 , seed_simulator=lowercase__ ) # Returns the result of measurement. _lowerCamelCase : Optional[Any] = job.result().get_counts(lowercase__ ).most_frequent() # Extracting the generated key from the simulation results. # Only keep measurement results where Alice and Bob chose the same basis. _lowerCamelCase : Optional[int] = ''.join( [ result_bit for alice_basis_bit, bob_basis_bit, result_bit in zip( lowercase__ , lowercase__ , lowercase__ ) if alice_basis_bit == bob_basis_bit ] ) # Get final key. Pad with 0 if too short, otherwise truncate. _lowerCamelCase : Union[str, Any] = gen_key[:key_len] if len(lowercase__ ) >= key_len else gen_key.ljust(lowercase__ , '0' ) return key if __name__ == "__main__": print(F"The generated key is : {bbaa(8, seed=0)}") from doctest import testmod testmod()
96
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def a ( ) -> Generator[int, None, None]: '''simple docstring''' UpperCamelCase__ :dict[int, int] = {} UpperCamelCase__ :Tuple = 2 while True: UpperCamelCase__ :str = factor_map.pop(__a , __a ) if factor: UpperCamelCase__ :List[str] = factor + prime while x in factor_map: x += factor UpperCamelCase__ :Optional[Any] = factor else: UpperCamelCase__ :List[str] = prime yield prime prime += 1 def a ( __a = 1e10 ) -> int: '''simple docstring''' UpperCamelCase__ :Union[str, Any] = sieve() UpperCamelCase__ :str = 1 while True: UpperCamelCase__ :Optional[Any] = next(__a ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__a ) n += 2 if __name__ == "__main__": print(solution())
97
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
"""simple docstring""" import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger lowerCAmelCase__ : Dict = '<<<<<<< This should probably be modified because it mentions: ' lowerCAmelCase__ : int = '=======\n>>>>>>>\n' lowerCAmelCase__ : Optional[int] = [ 'TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir', ] lowerCAmelCase__ : Dict = [ # (pattern, replacement) # Order is important here for some replacements (r'tfds\.core', r'datasets'), (r'tf\.io\.gfile\.GFile', r'open'), (r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'), (r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'), (r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'), (r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('), (r'tfds\.features\.FeaturesDict\(', r'dict('), (r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), (r'tfds\.', r'datasets.'), (r'dl_manager\.manual_dir', r'self.config.data_dir'), (r'self\.builder_config', r'self.config'), ] def a_ ( lowerCamelCase ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class snake_case ( __UpperCAmelCase ): """simple docstring""" @staticmethod def __lowerCAmelCase ( lowerCamelCase__ : ArgumentParser ): UpperCAmelCase__ = parser.add_parser( 'convert' ,help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' ,) train_parser.add_argument( '--tfds_path' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' ,) train_parser.add_argument( '--datasets_directory' ,type=lowerCamelCase__ ,required=lowerCamelCase__ ,help='Path to the HuggingFace Datasets folder.' ) train_parser.set_defaults(func=lowerCamelCase__ ) def __init__( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : str ,*lowerCamelCase__ : Union[str, Any] ): UpperCAmelCase__ = get_logger('datasets-cli/converting' ) UpperCAmelCase__ = tfds_path UpperCAmelCase__ = datasets_directory def __lowerCAmelCase ( self : List[Any] ): if os.path.isdir(self._tfds_path ): UpperCAmelCase__ = os.path.abspath(self._tfds_path ) elif os.path.isfile(self._tfds_path ): UpperCAmelCase__ = os.path.dirname(self._tfds_path ) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' ) UpperCAmelCase__ = os.path.abspath(self._datasets_directory ) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''' ) UpperCAmelCase__ = [] UpperCAmelCase__ = [] UpperCAmelCase__ = {} if os.path.isdir(self._tfds_path ): UpperCAmelCase__ = os.listdir(lowerCamelCase__ ) else: UpperCAmelCase__ = [os.path.basename(self._tfds_path )] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''' ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file' ) continue with open(lowerCamelCase__ ,encoding='utf-8' ) as f: UpperCAmelCase__ = f.readlines() UpperCAmelCase__ = [] UpperCAmelCase__ = False UpperCAmelCase__ = False UpperCAmelCase__ = [] for line in lines: UpperCAmelCase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: UpperCAmelCase__ = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here UpperCAmelCase__ = '' continue elif "from absl import logging" in out_line: UpperCAmelCase__ = 'from datasets import logging\n' elif "getLogger" in out_line: UpperCAmelCase__ = out_line.replace('getLogger' ,'get_logger' ) elif any(expression in out_line for expression in TO_HIGHLIGHT ): UpperCAmelCase__ = True UpperCAmelCase__ = list(filter(lambda lowerCamelCase__ : e in out_line ,lowerCamelCase__ ) ) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + '\n' ) out_lines.append(lowerCamelCase__ ) out_lines.append(lowerCamelCase__ ) continue else: for pattern, replacement in TO_CONVERT: UpperCAmelCase__ = re.sub(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: UpperCAmelCase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' ,lowerCamelCase__ ) tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) ) UpperCAmelCase__ = 'from . import ' + match.group(1 ) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''' ) if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: UpperCAmelCase__ = True out_lines.append(lowerCamelCase__ ) if is_builder or "wmt" in f_name: # We create a new directory for each dataset UpperCAmelCase__ = f_name.replace('.py' ,'' ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) UpperCAmelCase__ = os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) os.makedirs(lowerCamelCase__ ,exist_ok=lowerCamelCase__ ) self._logger.info(f'''Adding directory {output_dir}''' ) imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} ) else: # Utilities will be moved at the end utils_files.append(lowerCamelCase__ ) if needs_manual_update: with_manual_update.append(lowerCamelCase__ ) with open(lowerCamelCase__ ,'w' ,encoding='utf-8' ) as f: f.writelines(lowerCamelCase__ ) self._logger.info(f'''Converted in {output_file}''' ) for utils_file in utils_files: try: UpperCAmelCase__ = os.path.basename(lowerCamelCase__ ) UpperCAmelCase__ = imports_to_builder_map[f_name.replace('.py' ,'' )] self._logger.info(f'''Moving {dest_folder} to {utils_file}''' ) shutil.copy(lowerCamelCase__ ,lowerCamelCase__ ) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''' ) if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''' )
98
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowercase : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class A__ ( __UpperCAmelCase , unittest.TestCase ): """simple docstring""" __A : List[Any] = AlbertTokenizer __A : Dict = AlbertTokenizerFast __A : Optional[int] = True __A : Tuple = True __A : List[Any] = True def __lowercase ( self) -> Optional[Any]: '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing a__ : List[str] = AlbertTokenizer(lowercase) tokenizer.save_pretrained(self.tmpdirname) def __lowercase ( self , lowercase) -> str: '''simple docstring''' a__ : str = 'this is a test' a__ : str = 'this is a test' return input_text, output_text def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : int = '<pad>' a__ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase) , lowercase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase) , lowercase) def __lowercase ( self) -> Any: '''simple docstring''' a__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<pad>') self.assertEqual(vocab_keys[1] , '<unk>') self.assertEqual(vocab_keys[-1] , '▁eloquent') self.assertEqual(len(lowercase) , 3_0000) def __lowercase ( self) -> Tuple: '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 3_0000) def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' if not self.test_rust_tokenizer: return a__ : Any = self.get_tokenizer() a__ : Optional[Any] = self.get_rust_tokenizer() a__ : int = 'I was born in 92000, and this is falsé.' a__ : Optional[int] = tokenizer.tokenize(lowercase) a__ : Dict = rust_tokenizer.tokenize(lowercase) self.assertListEqual(lowercase , lowercase) a__ : Optional[int] = tokenizer.encode(lowercase , add_special_tokens=lowercase) a__ : Tuple = rust_tokenizer.encode(lowercase , add_special_tokens=lowercase) self.assertListEqual(lowercase , lowercase) a__ : Optional[int] = self.get_rust_tokenizer() a__ : Optional[int] = tokenizer.encode(lowercase) a__ : Optional[Any] = rust_tokenizer.encode(lowercase) self.assertListEqual(lowercase , lowercase) def __lowercase ( self) -> str: '''simple docstring''' a__ : Dict = AlbertTokenizer(lowercase , keep_accents=lowercase) a__ : Dict = tokenizer.tokenize('This is a test') self.assertListEqual(lowercase , ['▁this', '▁is', '▁a', '▁test']) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase) , [48, 25, 21, 1289]) a__ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowercase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.']) a__ : Optional[Any] = tokenizer.convert_tokens_to_ids(lowercase) self.assertListEqual(lowercase , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]) a__ : List[Any] = tokenizer.convert_ids_to_tokens(lowercase) self.assertListEqual( lowercase , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , ) def __lowercase ( self) -> str: '''simple docstring''' a__ : Union[str, Any] = AlbertTokenizer(lowercase) a__ : Any = tokenizer.encode('sequence builders') a__ : Union[str, Any] = tokenizer.encode('multi-sequence build') a__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowercase) a__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def __lowercase ( self) -> Optional[int]: '''simple docstring''' a__ : Dict = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowercase , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
99
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __magic_name__ = "2.13.1" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("3.7"): raise ImportWarning( "To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition." ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( "To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n" "If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`." ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __magic_name__ = concatenate_datasets __magic_name__ = DownloadConfig __magic_name__ = DownloadManager __magic_name__ = DownloadMode __magic_name__ = DownloadConfig __magic_name__ = DownloadMode __magic_name__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
100
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : jnp.ndarray lowercase_ : jnp.ndarray class lowercase ( nn.Module ): lowercase_ : int lowercase_ : Tuple[int] =(16, 32, 96, 256) lowercase_ : jnp.dtype =jnp.floataa def A__ ( self): lowercase = nn.Conv( self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) lowercase = [] for i in range(len(self.block_out_channels) - 1): lowercase = self.block_out_channels[i] lowercase = self.block_out_channels[i + 1] lowercase = nn.Conv( A__ ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(A__) lowercase = nn.Conv( A__ ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) blocks.append(A__) lowercase = blocks lowercase = nn.Conv( self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,A__): lowercase = self.conv_in(A__) lowercase = nn.silu(A__) for block in self.blocks: lowercase = block(A__) lowercase = nn.silu(A__) lowercase = self.conv_out(A__) return embedding @flax_register_to_config class lowercase ( nn.Module , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): lowercase_ : int =32 lowercase_ : int =4 lowercase_ : Tuple[str] =( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) lowercase_ : Union[bool, Tuple[bool]] =False lowercase_ : Tuple[int] =(320, 640, 1280, 1280) lowercase_ : int =2 lowercase_ : Union[int, Tuple[int]] =8 lowercase_ : Optional[Union[int, Tuple[int]]] =None lowercase_ : int =1280 lowercase_ : float =0.0 lowercase_ : bool =False lowercase_ : jnp.dtype =jnp.floataa lowercase_ : bool =True lowercase_ : int =0 lowercase_ : str ="rgb" lowercase_ : Tuple[int] =(16, 32, 96, 256) def A__ ( self ,A__): # init input tensors lowercase = (1, self.in_channels, self.sample_size, self.sample_size) lowercase = jnp.zeros(A__ ,dtype=jnp.floataa) lowercase = jnp.ones((1,) ,dtype=jnp.intaa) lowercase = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa) lowercase = (1, 3, self.sample_size * 8, self.sample_size * 8) lowercase = jnp.zeros(A__ ,dtype=jnp.floataa) lowercase , lowercase = jax.random.split(A__) lowercase = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(A__ ,A__ ,A__ ,A__ ,A__)["params"] def A__ ( self): lowercase = self.block_out_channels lowercase = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. lowercase = self.num_attention_heads or self.attention_head_dim # input lowercase = nn.Conv( block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,) # time lowercase = FlaxTimesteps( block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift) lowercase = FlaxTimestepEmbedding(A__ ,dtype=self.dtype) lowercase = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,) lowercase = self.only_cross_attention if isinstance(A__ ,A__): lowercase = (only_cross_attention,) * len(self.down_block_types) if isinstance(A__ ,A__): lowercase = (num_attention_heads,) * len(self.down_block_types) # down lowercase = [] lowercase = [] lowercase = block_out_channels[0] lowercase = nn.Conv( A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(A__) for i, down_block_type in enumerate(self.down_block_types): lowercase = output_channel lowercase = block_out_channels[i] lowercase = i == len(A__) - 1 if down_block_type == "CrossAttnDownBlock2D": lowercase = FlaxCrossAttnDownBlockaD( in_channels=A__ ,out_channels=A__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,) else: lowercase = FlaxDownBlockaD( in_channels=A__ ,out_channels=A__ ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,) down_blocks.append(A__) for _ in range(self.layers_per_block): lowercase = nn.Conv( A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(A__) if not is_final_block: lowercase = nn.Conv( A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) controlnet_down_blocks.append(A__) lowercase = down_blocks lowercase = controlnet_down_blocks # mid lowercase = block_out_channels[-1] lowercase = FlaxUNetMidBlockaDCrossAttn( in_channels=A__ ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,) lowercase = nn.Conv( A__ ,kernel_size=(1, 1) ,padding='''VALID''' ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,) def __call__( self ,A__ ,A__ ,A__ ,A__ ,A__ = 1.0 ,A__ = True ,A__ = False ,): lowercase = self.controlnet_conditioning_channel_order if channel_order == "bgr": lowercase = jnp.flip(A__ ,axis=1) # 1. time if not isinstance(A__ ,jnp.ndarray): lowercase = jnp.array([timesteps] ,dtype=jnp.intaa) elif isinstance(A__ ,jnp.ndarray) and len(timesteps.shape) == 0: lowercase = timesteps.astype(dtype=jnp.floataa) lowercase = jnp.expand_dims(A__ ,0) lowercase = self.time_proj(A__) lowercase = self.time_embedding(A__) # 2. pre-process lowercase = jnp.transpose(A__ ,(0, 2, 3, 1)) lowercase = self.conv_in(A__) lowercase = jnp.transpose(A__ ,(0, 2, 3, 1)) lowercase = self.controlnet_cond_embedding(A__) sample += controlnet_cond # 3. down lowercase = (sample,) for down_block in self.down_blocks: if isinstance(A__ ,A__): lowercase , lowercase = down_block(A__ ,A__ ,A__ ,deterministic=not train) else: lowercase , lowercase = down_block(A__ ,A__ ,deterministic=not train) down_block_res_samples += res_samples # 4. mid lowercase = self.mid_block(A__ ,A__ ,A__ ,deterministic=not train) # 5. contronet blocks lowercase = () for down_block_res_sample, controlnet_block in zip(A__ ,self.controlnet_down_blocks): lowercase = controlnet_block(A__) controlnet_down_block_res_samples += (down_block_res_sample,) lowercase = controlnet_down_block_res_samples lowercase = self.controlnet_mid_block(A__) # 6. scaling lowercase = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=A__ ,mid_block_res_sample=A__)
101
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
"""simple docstring""" import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' __snake_case : int = inspect.getfile(accelerate.test_utils ) __snake_case : List[Any] = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] ) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 __snake_case : Any = test_metrics @require_cpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' debug_launcher(self.test_metrics.main , num_processes=1 ) @require_cpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' debug_launcher(self.test_metrics.main ) @require_single_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.test_metrics.main() @require_multi_gpu def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' print(f"""Found {torch.cuda.device_count()} devices.""" ) __snake_case : str = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(a_ , env=os.environ.copy() )
102
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
import argparse import json import numpy import torch from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : str ): # Load checkpoint lowerCAmelCase_ : Union[str, Any] = torch.load(__UpperCamelCase ,map_location='''cpu''' ) lowerCAmelCase_ : List[str] = chkpt['''model'''] # We have the base model one level deeper than the original XLM repository lowerCAmelCase_ : Union[str, Any] = {} for k, v in state_dict.items(): if "pred_layer" in k: lowerCAmelCase_ : str = v else: lowerCAmelCase_ : List[str] = v lowerCAmelCase_ : Tuple = chkpt['''params'''] lowerCAmelCase_ : Optional[Any] = {n: v for n, v in config.items() if not isinstance(__UpperCamelCase ,(torch.FloatTensor, numpy.ndarray) )} lowerCAmelCase_ : List[str] = chkpt['''dico_word2id'''] lowerCAmelCase_ : List[Any] = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' ,'''''' ): i for s, i in vocab.items()} # Save pytorch-model lowerCAmelCase_ : Optional[int] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME lowerCAmelCase_ : Optional[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file'''] print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" ) torch.save(__UpperCamelCase ,__UpperCamelCase ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(__UpperCamelCase ,indent=2 ) + '''\n''' ) print(f"""Save vocab file to {pytorch_config_dump_path}""" ) with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f: f.write(json.dumps(__UpperCamelCase ,indent=2 ) + '''\n''' ) if __name__ == "__main__": A__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__ : Tuple = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
103
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
'''simple docstring''' import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase__ = 16 lowerCAmelCase__ = 32 def _A ( A__ ): """simple docstring""" return int(x / 2**20 ) class lowercase_ : """simple docstring""" def __enter__( self : List[str] ): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero __lowercase = torch.cuda.memory_allocated() return self def __exit__( self : Tuple ,*lowercase__ : int ): gc.collect() torch.cuda.empty_cache() __lowercase = torch.cuda.memory_allocated() __lowercase = torch.cuda.max_memory_allocated() __lowercase = bamb(self.end - self.begin ) __lowercase = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def _A ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ): """simple docstring""" __lowercase = AutoTokenizer.from_pretrained(A__ ) __lowercase = load_dataset( '''glue''' , '''mrpc''' , split={'''train''': F"train[:{n_train}]", '''validation''': F"validation[:{n_val}]"} ) def tokenize_function(A__ ): # max_length=None => use the model max length (it's actually the default) __lowercase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset __lowercase = datasets.map( A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=A__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __lowercase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(A__ ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(A__ , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(A__ , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. __lowercase = DataLoader( tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) __lowercase = DataLoader( tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ ) return train_dataloader, eval_dataloader def _A ( A__ , A__ ): """simple docstring""" __lowercase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase = config['''lr'''] __lowercase = int(config['''num_epochs'''] ) __lowercase = int(config['''seed'''] ) __lowercase = int(config['''batch_size'''] ) __lowercase = args.model_name_or_path set_seed(A__ ) __lowercase , __lowercase = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ ) # Instantiate optimizer __lowercase = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) __lowercase = optimizer_cls(params=model.parameters() , lr=A__ ) if accelerator.state.deepspeed_plugin is not None: __lowercase = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: __lowercase = 1 __lowercase = (len(A__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): __lowercase = get_linear_schedule_with_warmup( optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , ) else: __lowercase = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = accelerator.prepare( A__ , A__ , A__ , A__ , A__ ) # We need to keep track of how many total steps we have iterated over __lowercase = 0 # We also need to keep track of the stating epoch so files are named properly __lowercase = 0 # Now we train the model __lowercase = {} for epoch in range(A__ , A__ ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(A__ ): __lowercase = model(**A__ ) __lowercase = outputs.loss __lowercase = loss / gradient_accumulation_steps accelerator.backward(A__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) ) accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) ) accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) ) accelerator.print( '''Total Peak Memory consumed during the train (max): {}'''.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) __lowercase = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[F"epoch-{epoch}"] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f: json.dump(A__ , A__ ) def _A ( ): """simple docstring""" __lowercase = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=A__ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=A__ , ) parser.add_argument( '''--output_dir''' , type=A__ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--peak_memory_upper_bound''' , type=A__ , default=A__ , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , ) parser.add_argument( '''--n_train''' , type=A__ , default=320 , help='''Number of training examples to use.''' , ) parser.add_argument( '''--n_val''' , type=A__ , default=160 , help='''Number of validation examples to use.''' , ) parser.add_argument( '''--num_epochs''' , type=A__ , default=1 , help='''Number of train epochs.''' , ) __lowercase = parser.parse_args() __lowercase = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(A__ , A__ ) if __name__ == "__main__": main()
104
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class __UpperCamelCase ( unittest.TestCase ): def __a ( self ) -> str: a : Dict = 0 def __a ( self ) -> List[str]: a : List[str] = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: a : List[str] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : Optional[Any] = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) a : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Optional[Any]: # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: a : Union[str, Any] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : str = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) a : Optional[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdirname: a : Optional[Any] = CLIPConfig() # Create a dummy config file with image_proceesor_type a : List[str] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : Optional[Any] = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally a : List[str] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ).to_dict() config_dict.pop("image_processor_type" ) a : Optional[int] = CLIPImageProcessor(**lowerCAmelCase__ ) # save in new folder model_config.save_pretrained(lowerCAmelCase__ ) config.save_pretrained(lowerCAmelCase__ ) a : str = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) # make sure private variable is not incorrectly saved a : List[Any] = json.loads(config.to_json_string() ) self.assertTrue("_processor_class" not in dict_as_saved ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdirname: a : Dict = Path(lowerCAmelCase__ ) / "preprocessor_config.json" json.dump( {"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) a : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Any: with self.assertRaisesRegex( lowerCAmelCase__ , "clip-base is not a local folder and is not a valid model identifier" ): a : Union[str, Any] = AutoImageProcessor.from_pretrained("clip-base" ) def __a ( self ) -> Union[str, Any]: with self.assertRaisesRegex( lowerCAmelCase__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): a : Union[str, Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" ) def __a ( self ) -> Union[str, Any]: with self.assertRaisesRegex( lowerCAmelCase__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ): a : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" ) def __a ( self ) -> Any: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(lowerCAmelCase__ ): a : Union[str, Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCAmelCase__ ): a : Tuple = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) a : Tuple = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCAmelCase__ ) a : Dict = AutoImageProcessor.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" ) def __a ( self ) -> int: try: AutoConfig.register("custom" , lowerCAmelCase__ ) AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCAmelCase__ ): AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ ) with tempfile.TemporaryDirectory() as tmpdirname: a : List[Any] = Path(lowerCAmelCase__ ) / "preprocessor_config.json" a : Any = Path(lowerCAmelCase__ ) / "config.json" json.dump( {"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(lowerCAmelCase__ , "w" ) , ) json.dump({"model_type": "clip"} , open(lowerCAmelCase__ , "w" ) ) a : Optional[int] = CustomImageProcessor.from_pretrained(lowerCAmelCase__ ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(lowerCAmelCase__ ) a : List[Any] = AutoImageProcessor.from_pretrained(lowerCAmelCase__ ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __a ( self ) -> int: class __UpperCamelCase ( a__ ): lowerCamelCase : Tuple =True try: AutoConfig.register("custom" , lowerCAmelCase__ ) AutoImageProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ ) # If remote code is not set, the default is to use local a : Tuple = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. a : Optional[int] = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub a : Optional[Any] = AutoImageProcessor.from_pretrained( "hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=lowerCAmelCase__ ) self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" ) self.assertTrue(not hasattr(lowerCAmelCase__ , "is_local" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
105
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ ): if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(A_ , A_ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(A_ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
106
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __lowerCAmelCase : Optional[int] = False class snake_case__ (unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Dict ) -> int: a = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = "A painting of a squirrel eating a burger " a = torch.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowerCamelCase ) a = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = generator.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCAmelCase ( self : str ) -> List[str]: a = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = "A painting of a squirrel eating a burger " a = torch.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images a = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
107
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" lowerCAmelCase__ = 8.314462 # Unit - J mol-1 K-1 def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or volume < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def a__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ): '''simple docstring''' if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError("Invalid inputs. Enter positive value." ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
108
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class SCREAMING_SNAKE_CASE__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , ) -> List[str]: '''simple docstring''' UpperCAmelCase : str = parent UpperCAmelCase : str = batch_size UpperCAmelCase : int = image_size UpperCAmelCase : str = patch_size UpperCAmelCase : Any = num_channels UpperCAmelCase : Dict = embed_dim UpperCAmelCase : Any = depths UpperCAmelCase : str = num_heads UpperCAmelCase : Optional[Any] = window_size UpperCAmelCase : int = mlp_ratio UpperCAmelCase : Any = qkv_bias UpperCAmelCase : Union[str, Any] = hidden_dropout_prob UpperCAmelCase : List[Any] = attention_probs_dropout_prob UpperCAmelCase : Any = drop_path_rate UpperCAmelCase : List[Any] = hidden_act UpperCAmelCase : Tuple = use_absolute_embeddings UpperCAmelCase : Optional[Any] = patch_norm UpperCAmelCase : Tuple = layer_norm_eps UpperCAmelCase : List[Any] = initializer_range UpperCAmelCase : Optional[int] = is_training UpperCAmelCase : Any = scope UpperCAmelCase : List[str] = use_labels UpperCAmelCase : Union[str, Any] = type_sequence_label_size UpperCAmelCase : Any = encoder_stride def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase : Optional[Any] = None if self.use_labels: UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' UpperCAmelCase : Tuple = SwinvaModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase : str = model(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' UpperCAmelCase : int = SwinvaForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase : List[Any] = 1 UpperCAmelCase : Tuple = SwinvaForMaskedImageModeling(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.type_sequence_label_size UpperCAmelCase : str = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() UpperCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[int] = config_and_inputs UpperCAmelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): __lowerCAmelCase : Tuple = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __lowerCAmelCase : Union[str, Any] = ( {'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification} if is_torch_available() else {} ) __lowerCAmelCase : Dict = False __lowerCAmelCase : Optional[Any] = False __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : str = False def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase : str = SwinvaModelTester(self ) UpperCAmelCase : Tuple = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 ) def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' pass def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase : List[str] = [*signature.parameters.keys()] UpperCAmelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Tuple: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[str] = True for model_class in self.all_model_classes: UpperCAmelCase : str = True UpperCAmelCase : str = False UpperCAmelCase : Dict = True UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Any = outputs.attentions UpperCAmelCase : Tuple = len(self.model_tester.depths ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase : Any = True UpperCAmelCase : Optional[int] = config.window_size**2 UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) UpperCAmelCase : Any = len(_SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine UpperCAmelCase : int = True UpperCAmelCase : int = True UpperCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): UpperCAmelCase : List[Any] = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase : List[Any] = 2 self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : Optional[int] = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): UpperCAmelCase : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) UpperCAmelCase : List[Any] = outputs.hidden_states UpperCAmelCase : Optional[Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # Swinv2 has a different seq_length UpperCAmelCase : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) UpperCAmelCase : List[str] = outputs.reshaped_hidden_states self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = reshaped_hidden_states[0].shape UpperCAmelCase : List[str] = ( reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE ( self ) -> Dict: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase : List[str] = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Optional[Any] = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> List[str]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : List[str] = 3 UpperCAmelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase : List[Any] = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase : Union[str, Any] = True self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) ) def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: '''simple docstring''' UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE ) @slow def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Tuple = SwinvaModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def SCREAMING_SNAKE_CASE ( self ) -> List[Any]: '''simple docstring''' UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : Any = _config_zero_init(_SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: UpperCAmelCase : List[str] = model_class(config=_SCREAMING_SNAKE_CASE ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @cached_property def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: '''simple docstring''' return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self ) -> str: '''simple docstring''' UpperCAmelCase : Union[str, Any] = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( _SCREAMING_SNAKE_CASE ) UpperCAmelCase : List[str] = self.default_image_processor UpperCAmelCase : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) UpperCAmelCase : Tuple = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): UpperCAmelCase : Dict = model(**_SCREAMING_SNAKE_CASE ) # verify the logits UpperCAmelCase : int = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) UpperCAmelCase : str = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
109
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
"""simple docstring""" import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def lowercase ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[str] , __snake_case : List[Any] ): lowercase_ : Optional[int] = BigBirdConfig.from_json_file(_SCREAMING_SNAKE_CASE ) print(F'''Building PyTorch model from configuration: {config}''' ) if is_trivia_qa: lowercase_ : Dict = BigBirdForQuestionAnswering(_SCREAMING_SNAKE_CASE ) else: lowercase_ : Tuple = BigBirdForPreTraining(_SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , is_trivia_qa=_SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--big_bird_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_trivia_qa''', action='''store_true''', help='''Whether to convert a model with a trivia_qa head.''' ) __A : List[str] = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
33
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
"""simple docstring""" import numpy as np def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]: a__: Any = int(np.ceil((x_end - xa) / h ) ) a__: Optional[int] = np.zeros((n + 1,) ) a__: Optional[int] = ya a__: Union[str, Any] = xa for k in range(_SCREAMING_SNAKE_CASE ): a__: List[str] = f(_SCREAMING_SNAKE_CASE , y[k] ) a__: List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) a__: Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) a__: Tuple = f(x + h , y[k] + h * ka ) a__: Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
290
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase , "tf_padding" ) ) self.parent.assertTrue(hasattr(__UpperCamelCase , "depth_multiplier" ) ) class A__ : def __init__( self , A_ , A_=13 , A_=3 , A_=32 , A_=0.25 , A_=8 , A_=True , A_=1024 , A_=32 , A_="relu6" , A_=0.1 , A_=0.02 , A_=True , A_=True , A_=10 , A_=None , ): '''simple docstring''' UpperCamelCase : Dict = parent UpperCamelCase : Tuple = batch_size UpperCamelCase : Dict = num_channels UpperCamelCase : Any = image_size UpperCamelCase : Any = depth_multiplier UpperCamelCase : Dict = min_depth UpperCamelCase : List[str] = tf_padding UpperCamelCase : List[str] = int(last_hidden_size * depth_multiplier ) UpperCamelCase : str = output_stride UpperCamelCase : List[Any] = hidden_act UpperCamelCase : Tuple = classifier_dropout_prob UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : Union[str, Any] = is_training UpperCamelCase : Union[str, Any] = num_labels UpperCamelCase : str = initializer_range UpperCamelCase : List[str] = scope def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCamelCase : List[Any] = None UpperCamelCase : Any = None if self.use_labels: UpperCamelCase : str = ids_tensor([self.batch_size] , self.num_labels ) UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCamelCase : int = self.get_config() return config, pixel_values, labels, pixel_labels def __UpperCamelCase( self ): '''simple docstring''' return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = MobileNetVaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() UpperCamelCase : str = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = self.num_labels UpperCamelCase : Union[str, Any] = MobileNetVaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() UpperCamelCase : Optional[int] = model(__UpperCamelCase , labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : int = config_and_inputs UpperCamelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () _UpperCAmelCase :List[str] = ( {'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification} if is_torch_available() else {} ) _UpperCAmelCase :Tuple = False _UpperCAmelCase :int = False _UpperCAmelCase :List[Any] = False _UpperCAmelCase :str = False def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = MobileNetVaModelTester(self ) UpperCamelCase : List[Any] = MobileNetVaConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV1 does not use inputs_embeds" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip(reason="MobileNetV1 does not support input and output embeddings" ) def __UpperCamelCase( self ): '''simple docstring''' pass @unittest.skip(reason="MobileNetV1 does not output attentions" ) def __UpperCamelCase( self ): '''simple docstring''' pass def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Any = model_class(__UpperCamelCase ) UpperCamelCase : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCamelCase : Any = [*signature.parameters.keys()] UpperCamelCase : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , __UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' def check_hidden_states_output(A_ , A_ , A_ ): UpperCamelCase : int = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) ) UpperCamelCase : Any = outputs.hidden_states UpperCamelCase : List[Any] = 26 self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) UpperCamelCase , UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCamelCase : Optional[Any] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCamelCase : Optional[Any] = True check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def A_ ( ) -> str: UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def __UpperCamelCase( self ): '''simple docstring''' return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v1_1.0_224" ) if is_vision_available() else None ) @slow def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v1_1.0_224" ).to(__UpperCamelCase ) UpperCamelCase : Dict = self.default_image_processor UpperCamelCase : Dict = prepare_img() UpperCamelCase : Any = image_processor(images=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): UpperCamelCase : str = model(**__UpperCamelCase ) # verify the logits UpperCamelCase : Union[str, Any] = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , __UpperCamelCase ) UpperCamelCase : List[Any] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
52
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
43
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) lowerCamelCase : Any = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} lowerCamelCase : Any = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } lowerCamelCase : List[str] = { "allenai/longformer-base-4096": 4_096, "allenai/longformer-large-4096": 4_096, "allenai/longformer-large-4096-finetuned-triviaqa": 4_096, "allenai/longformer-base-4096-extra.pos.embd.only": 4_096, "allenai/longformer-large-4096-extra.pos.embd.only": 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _SCREAMING_SNAKE_CASE ( ): '''simple docstring''' lowerCamelCase_ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowerCamelCase_ = bs[:] lowerCamelCase_ = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 lowerCamelCase_ = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def _SCREAMING_SNAKE_CASE ( lowercase : str ): '''simple docstring''' lowerCamelCase_ = set() lowerCamelCase_ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase_ = char return pairs class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = VOCAB_FILES_NAMES UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self : Tuple , A_ : str , A_ : int , A_ : Tuple="replace" , A_ : int="<s>" , A_ : Optional[Any]="</s>" , A_ : Optional[Any]="</s>" , A_ : Dict="<s>" , A_ : List[str]="<unk>" , A_ : Tuple="<pad>" , A_ : Any="<mask>" , A_ : List[Any]=False , **A_ : Optional[Any] , ) -> str: """simple docstring""" lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else bos_token lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else eos_token lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else sep_token lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else cls_token lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else unk_token lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase_ = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token super().__init__( errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , ) with open(__UpperCamelCase , encoding='utf-8' ) as vocab_handle: lowerCamelCase_ = json.load(__UpperCamelCase ) lowerCamelCase_ = {v: k for k, v in self.encoder.items()} lowerCamelCase_ = errors # how to handle errors in decoding lowerCamelCase_ = bytes_to_unicode() lowerCamelCase_ = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCamelCase , encoding='utf-8' ) as merges_handle: lowerCamelCase_ = merges_handle.read().split('\n' )[1:-1] lowerCamelCase_ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase_ = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) lowerCamelCase_ = {} lowerCamelCase_ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase_ = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def a__ ( self : Any ) -> List[Any]: """simple docstring""" return len(self.encoder ) def a__ ( self : str ) -> Optional[int]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self : Any , A_ : str ) -> Optional[int]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCamelCase_ = tuple(__UpperCamelCase ) lowerCamelCase_ = get_pairs(__UpperCamelCase ) if not pairs: return token while True: lowerCamelCase_ = min(__UpperCamelCase , key=lambda A_ : self.bpe_ranks.get(__UpperCamelCase , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase_ , lowerCamelCase_ = bigram lowerCamelCase_ = [] lowerCamelCase_ = 0 while i < len(__UpperCamelCase ): try: lowerCamelCase_ = word.index(__UpperCamelCase , __UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase_ = j if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase_ = tuple(__UpperCamelCase ) lowerCamelCase_ = new_word if len(__UpperCamelCase ) == 1: break else: lowerCamelCase_ = get_pairs(__UpperCamelCase ) lowerCamelCase_ = ' '.join(__UpperCamelCase ) lowerCamelCase_ = word return word def a__ ( self : List[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = [] for token in re.findall(self.pat , __UpperCamelCase ): lowerCamelCase_ = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(' ' ) ) return bpe_tokens def a__ ( self : Tuple , A_ : Dict ) -> str: """simple docstring""" return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) ) def a__ ( self : List[str] , A_ : List[Any] ) -> Any: """simple docstring""" return self.decoder.get(__UpperCamelCase ) def a__ ( self : Optional[int] , A_ : int ) -> Dict: """simple docstring""" lowerCamelCase_ = ''.join(__UpperCamelCase ) lowerCamelCase_ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def a__ ( self : Union[str, Any] , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCamelCase_ = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowerCamelCase_ = os.path.join( __UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '\n' ) lowerCamelCase_ = 0 with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) lowerCamelCase_ = token_index writer.write(' '.join(__UpperCamelCase ) + '\n' ) index += 1 return vocab_file, merge_file def a__ ( self : Dict , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] lowerCamelCase_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None , A_ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] def a__ ( self : str , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCamelCase_ = [self.sep_token_id] lowerCamelCase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self : int , A_ : Optional[Any] , A_ : Dict=False , **A_ : Dict ) -> Union[str, Any]: """simple docstring""" lowerCamelCase_ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()): lowerCamelCase_ = ' ' + text return (text, kwargs)
204
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy A_ : Dict = logging.getLogger(__name__) A_ : List[str] = "pytorch_model.bin" @dataclasses.dataclass class lowerCamelCase : lowerCamelCase__ : Tuple = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) lowerCamelCase__ : List[Any] = dataclasses.field( default=A__ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} ,) @dataclasses.dataclass class lowerCamelCase : lowerCamelCase__ : Any = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) lowerCamelCase__ : Union[str, Any] = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) lowerCamelCase__ : Any = dataclasses.field( default=A__ ,metadata={'help': 'A csv or a json file containing the validation data.'} ) lowerCamelCase__ : Any = dataclasses.field( default=A__ ,metadata={'help': 'The name of the task to train on.'} ,) lowerCamelCase__ : Optional[Any] = dataclasses.field( default=A__ ,metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class lowerCamelCase : lowerCamelCase__ : List[Any] = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) lowerCamelCase__ : Dict = dataclasses.field( default='accuracy' ,metadata={'help': 'The evaluation metric used for the task.'} ) lowerCamelCase__ : str = dataclasses.field( default='no' ,metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]' } ,) lowerCamelCase__ : Optional[int] = dataclasses.field( default=1_0 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,) lowerCamelCase__ : int = dataclasses.field( default=0.0 ,metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } ,) lowerCamelCase__ : int = dataclasses.field( default=A__ ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} ,) lowerCamelCase__ : Tuple = dataclasses.field( default=A__ ,metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} ,) lowerCamelCase__ : Dict = dataclasses.field( default=A__ ,metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} ,) lowerCamelCase__ : Union[str, Any] = dataclasses.field( default=0.0 ,metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} ,) lowerCamelCase__ : Dict = dataclasses.field( default=1_0_0 ,metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} ,) lowerCamelCase__ : int = dataclasses.field( default=A__ ,metadata={'help': 'Random seed for initialization.'} ,) def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 ) if args.do_filter_by_confidence: SCREAMING_SNAKE_CASE__ = dataset.filter(lambda snake_case__ : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 SCREAMING_SNAKE_CASE__ = int(eval_result * len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = dataset.sort("""probability""" , reverse=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = dataset.select(range(_SCREAMING_SNAKE_CASE ) ) SCREAMING_SNAKE_CASE__ = dataset.remove_columns(["""label""", """probability"""] ) SCREAMING_SNAKE_CASE__ = dataset.rename_column("""prediction""" , """label""" ) SCREAMING_SNAKE_CASE__ = dataset.map(lambda snake_case__ : {"label": idalabel[example["label"]]} ) SCREAMING_SNAKE_CASE__ = dataset.shuffle(seed=args.seed ) SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(_SCREAMING_SNAKE_CASE , index=_SCREAMING_SNAKE_CASE ) else: dataset.to_json(_SCREAMING_SNAKE_CASE ) def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() SCREAMING_SNAKE_CASE__ = STModelArguments(model_name_or_path=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = STDataArguments(train_file=_SCREAMING_SNAKE_CASE , infer_file=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = STTrainingArguments(output_dir=_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_SCREAMING_SNAKE_CASE ).items(): setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for key, value in kwargs.items(): if hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Sanity checks SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None SCREAMING_SNAKE_CASE__ = args.train_file SCREAMING_SNAKE_CASE__ = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None SCREAMING_SNAKE_CASE__ = args.eval_file for key in data_files: SCREAMING_SNAKE_CASE__ = data_files[key].split(""".""" )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: SCREAMING_SNAKE_CASE__ = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("""Creating the initial data directory for self-training...""" ) SCREAMING_SNAKE_CASE__ = f"""{args.output_dir}/self-train_iter-{{}}""".format SCREAMING_SNAKE_CASE__ = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE ) os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = False # Show the progress bar SCREAMING_SNAKE_CASE__ = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations ) ): SCREAMING_SNAKE_CASE__ = data_dir_format(_SCREAMING_SNAKE_CASE ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """stage-1""" ) SCREAMING_SNAKE_CASE__ = { """accelerator""": accelerator, """model_name_or_path""": args.model_name_or_path, """cache_dir""": args.cache_dir, """do_train""": True, """train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""], """do_eval""": True if args.eval_file is not None else False, """eval_file""": data_files["""eval"""], """do_predict""": True, """infer_file""": data_files["""infer"""], """task_name""": args.task_name, """label_list""": args.label_list, """output_dir""": current_output_dir, """eval_metric""": args.eval_metric, """evaluation_strategy""": args.evaluation_strategy, """early_stopping_patience""": args.early_stopping_patience, """early_stopping_threshold""": args.early_stopping_threshold, """seed""": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): arguments_dict.update({key: value} ) SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _SCREAMING_SNAKE_CASE ) finetune(**_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() assert os.path.exists(_SCREAMING_SNAKE_CASE ) logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _SCREAMING_SNAKE_CASE ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" ) SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """stage-2""" ) # Update arguments_dict SCREAMING_SNAKE_CASE__ = model_path SCREAMING_SNAKE_CASE__ = data_files["""train"""] SCREAMING_SNAKE_CASE__ = current_output_dir SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): logger.info( """Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _SCREAMING_SNAKE_CASE ) finetune(**_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() assert os.path.exists(_SCREAMING_SNAKE_CASE ) logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = iteration SCREAMING_SNAKE_CASE__ = data_dir_format(iteration + 1 ) SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , """best-checkpoint""" ) ) SCREAMING_SNAKE_CASE__ = config.idalabel SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """eval_results_best-checkpoint.json""" ) SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """test_results_best-checkpoint.json""" ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE , """r""" ) as f: SCREAMING_SNAKE_CASE__ = float(json.load(_SCREAMING_SNAKE_CASE )[args.eval_metric] ) SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , """infer_output_best-checkpoint.csv""" ) assert os.path.exists(_SCREAMING_SNAKE_CASE ) # Loading the dataset from local csv or json files. SCREAMING_SNAKE_CASE__ = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""] SCREAMING_SNAKE_CASE__ = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""] if accelerator.is_main_process: os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(_SCREAMING_SNAKE_CASE ): shutil.copy(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() SCREAMING_SNAKE_CASE__ = os.path.join(_SCREAMING_SNAKE_CASE , f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: SCREAMING_SNAKE_CASE__ = eval_result if best_iteration is None: SCREAMING_SNAKE_CASE__ = new_iteration SCREAMING_SNAKE_CASE__ = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: SCREAMING_SNAKE_CASE__ = new_iteration SCREAMING_SNAKE_CASE__ = new_eval_result SCREAMING_SNAKE_CASE__ = 0 else: if new_eval_result == best_eval_result: SCREAMING_SNAKE_CASE__ = new_iteration SCREAMING_SNAKE_CASE__ = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: SCREAMING_SNAKE_CASE__ = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("""Best iteration: %d""" , _SCREAMING_SNAKE_CASE ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , """eval_results_best-iteration.json""" ) , ) else: # Assume that the last iteration is the best logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 ) logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_SCREAMING_SNAKE_CASE , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_SCREAMING_SNAKE_CASE , """eval_results_best-iteration.json""" ) , )
165
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
import inspect import os import unittest import torch import accelerate from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_multi_gpu from accelerate.utils import patch_environment class lowercase_ ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE_ ( self ) ->int: lowerCAmelCase = inspect.getfile(accelerate.test_utils ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] ) lowerCAmelCase = os.path.sep.join( mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] ) lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] ) @require_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]: print(F"Found {torch.cuda.device_count()} devices." ) lowerCAmelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]: print(F"Found {torch.cuda.device_count()} devices." ) lowerCAmelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path] print(F"Command: {cmd}" ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: lowerCAmelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] with patch_environment(omp_num_threads=1 ): execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) @require_multi_gpu def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]: print(F"Found {torch.cuda.device_count()} devices, using 2 devices only" ) lowerCAmelCase = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path] with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ): execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": lowercase__ : List[str] = Accelerator() lowercase__ : int = (accelerator.state.process_index + 2, 1_0) lowercase__ : List[Any] = torch.randint(0, 1_0, shape).to(accelerator.device) lowercase__ : str = "" lowercase__ : Optional[int] = accelerator.pad_across_processes(tensor) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0): error_msg += "Padding was not done with the right value (0)." lowercase__ : Any = accelerator.pad_across_processes(tensor, pad_first=True) if tensora.shape[0] != accelerator.state.num_processes + 1: error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0." lowercase__ : Any = accelerator.state.num_processes - accelerator.state.process_index - 1 if not torch.equal(tensora[index:], tensor): error_msg += "Tensors have different values." if not torch.all(tensora[:index] == 0): error_msg += "Padding was not done with the right value (0)." # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
338
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={} class _a ( __a ): __a : List[Any] = """llama""" __a : Optional[int] = ["""past_key_values"""] def __init__( self : Dict , lowercase : int=32_000 , lowercase : List[str]=4_096 , lowercase : Optional[int]=11_008 , lowercase : Optional[Any]=32 , lowercase : Tuple=32 , lowercase : List[str]=None , lowercase : Dict="silu" , lowercase : Tuple=2_048 , lowercase : List[str]=0.02 , lowercase : int=1E-6 , lowercase : Any=True , lowercase : Union[str, Any]=0 , lowercase : int=1 , lowercase : List[str]=2 , lowercase : Optional[int]=1 , lowercase : List[Any]=False , lowercase : Any=None , **lowercase : Optional[int] , ): '''simple docstring''' UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = intermediate_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads # for backward compatibility if num_key_value_heads is None: UpperCAmelCase = num_attention_heads UpperCAmelCase = num_key_value_heads UpperCAmelCase = hidden_act UpperCAmelCase = initializer_range UpperCAmelCase = rms_norm_eps UpperCAmelCase = pretraining_tp UpperCAmelCase = use_cache UpperCAmelCase = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , ) def A ( self : Tuple ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) UpperCAmelCase = self.rope_scaling.get('''type''' , __UpperCamelCase ) UpperCAmelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
34
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
"""simple docstring""" from sklearn.metrics import recall_score import datasets _a : Optional[int]= "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n" _a : List[str]= "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n" _a : Tuple= "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def _lowercase (self : List[Any]) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32')), 'references': datasets.Sequence(datasets.Value('int32')), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32'), 'references': datasets.Value('int32'), }) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'] , ) def _lowercase (self : Any , _A : Union[str, Any] , _A : List[Any] , _A : Dict=None , _A : Tuple=1 , _A : List[Any]="binary" , _A : List[Any]=None , _A : str="warn" , ) -> str: __snake_case : List[str] = recall_score( __UpperCamelCase , __UpperCamelCase , labels=__UpperCamelCase , pos_label=__UpperCamelCase , average=__UpperCamelCase , sample_weight=__UpperCamelCase , zero_division=__UpperCamelCase , ) return {"recall": float(__UpperCamelCase) if score.size == 1 else score}
172
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print('Googling.....') _lowerCamelCase : Optional[Any] = "https://www.google.com/search?q=" + " ".join(sys.argv[1:]) _lowerCamelCase : Dict = requests.get(url, headers={'UserAgent': UserAgent().random}) # res.raise_for_status() with open('project1a.html', 'wb') as out_file: # only for knowing the class for data in res.iter_content(1_0000): out_file.write(data) _lowerCamelCase : str = BeautifulSoup(res.text, 'html.parser') _lowerCamelCase : str = list(soup.select('.eZt8xd'))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get('href')) else: webbrowser.open(f"https://google.com{link.get('href')}")
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A__ = logging.get_logger(__name__) def _UpperCAmelCase ( snake_case ): """simple docstring""" if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(_SCREAMING_SNAKE_CASE ): return [[videos]] raise ValueError(F'Could not make batched video from {videos}' ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = ['''pixel_values'''] def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BILINEAR , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = None , _snake_case = None , **_snake_case , ): """simple docstring""" super().__init__(**__UpperCamelCase ) _lowerCAmelCase = size if size is not None else {"""shortest_edge""": 224} _lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} _lowerCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = resample _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD def snake_case ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BILINEAR , _snake_case = None , **_snake_case , ): """simple docstring""" _lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" in size: _lowerCAmelCase = get_resize_output_image_size(__UpperCamelCase , size["""shortest_edge"""] , default_to_square=__UpperCamelCase ) elif "height" in size and "width" in size: _lowerCAmelCase = (size["""height"""], size["""width"""]) else: raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" _lowerCAmelCase = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' ) return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ): """simple docstring""" return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def snake_case ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , ): """simple docstring""" if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. _lowerCAmelCase = to_numpy_array(__UpperCamelCase ) if do_resize: _lowerCAmelCase = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) if do_center_crop: _lowerCAmelCase = self.center_crop(__UpperCamelCase , size=__UpperCamelCase ) if do_rescale: _lowerCAmelCase = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) if do_normalize: _lowerCAmelCase = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) _lowerCAmelCase = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) return image def snake_case ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ): """simple docstring""" _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" ) if not valid_images(__UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) _lowerCAmelCase = make_batched(__UpperCamelCase ) _lowerCAmelCase = [ [ self._preprocess_image( image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , ) for img in video ] for video in videos ] _lowerCAmelCase = {"""pixel_values""": videos} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
82
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" from io import BytesIO from typing import List, Union import requests from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_decord_available(): import numpy as np from decord import VideoReader if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING __A : List[str] = logging.get_logger(__name__) @add_end_docstrings(_A ) class _UpperCAmelCase ( _A ): def __init__( self : int , *A : str , **A : List[Any] ) -> Optional[int]: super().__init__(*__UpperCamelCase , **__UpperCamelCase ) requires_backends(self , '''decord''' ) self.check_model_type(__UpperCamelCase ) def A ( self : Optional[int] , A : List[Any]=None , A : int=None , A : Optional[Any]=None ) -> Any: lowercase_ : Dict = {} if frame_sampling_rate is not None: lowercase_ : Optional[int] = frame_sampling_rate if num_frames is not None: lowercase_ : Union[str, Any] = num_frames lowercase_ : Any = {} if top_k is not None: lowercase_ : str = top_k return preprocess_params, {}, postprocess_params def __call__( self : int , A : Union[str, List[str]] , **A : List[str] ) -> Union[str, Any]: return super().__call__(__UpperCamelCase , **__UpperCamelCase ) def A ( self : List[Any] , A : Optional[Any] , A : List[str]=None , A : List[Any]=1 ) -> Tuple: if num_frames is None: lowercase_ : Dict = self.model.config.num_frames if video.startswith('''http://''' ) or video.startswith('''https://''' ): lowercase_ : Union[str, Any] = BytesIO(requests.get(__UpperCamelCase ).content ) lowercase_ : Union[str, Any] = VideoReader(__UpperCamelCase ) videoreader.seek(0 ) lowercase_ : List[Any] = 0 lowercase_ : List[str] = num_frames * frame_sampling_rate - 1 lowercase_ : Tuple = np.linspace(__UpperCamelCase , __UpperCamelCase , num=__UpperCamelCase , dtype=np.intaa ) lowercase_ : Union[str, Any] = videoreader.get_batch(__UpperCamelCase ).asnumpy() lowercase_ : Tuple = list(__UpperCamelCase ) lowercase_ : Optional[Any] = self.image_processor(__UpperCamelCase , return_tensors=self.framework ) return model_inputs def A ( self : List[str] , A : str ) -> Optional[int]: lowercase_ : Optional[Any] = self.model(**__UpperCamelCase ) return model_outputs def A ( self : Optional[Any] , A : str , A : Optional[int]=5 ) -> int: if top_k > self.model.config.num_labels: lowercase_ : str = self.model.config.num_labels if self.framework == "pt": lowercase_ : Optional[int] = model_outputs.logits.softmax(-1 )[0] lowercase_ , lowercase_ : Optional[int] = probs.topk(__UpperCamelCase ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowercase_ : Union[str, Any] = scores.tolist() lowercase_ : Optional[int] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__UpperCamelCase , __UpperCamelCase )]
33
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowercase__ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowercase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def __a ( _SCREAMING_SNAKE_CASE ) ->Any: with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f: a__: Tuple = Image.open(_SCREAMING_SNAKE_CASE ) return im.convert('RGB' ) @dataclass class __snake_case : a__ = field( default=__lowerCAmelCase , metadata={ """help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).""" } , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) a__ = field(default=__lowerCAmelCase , metadata={"""help""": """A folder containing the training data."""} ) a__ = field(default=__lowerCAmelCase , metadata={"""help""": """A folder containing the validation data."""} ) a__ = field( default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( 'You must specify either a dataset name from the hub or a train and/or validation directory.') @dataclass class __snake_case : a__ = field( default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(__lowerCAmelCase )} , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} ) a__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) a__ = field(default=__lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) a__ = field( default=__lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) a__ = field( default=__lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __a ( _SCREAMING_SNAKE_CASE ) ->Union[str, Any]: a__: Optional[Any] = torch.stack([example['pixel_values'] for example in examples] ) a__: Tuple = torch.tensor([example['labels'] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def __a ( ) ->Dict: a__: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a__ , a__ , a__: Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a__ , a__ , a__: int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_image_classification' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a__: Optional[Any] = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. a__: Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a__: List[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: a__: Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , ) else: a__: List[str] = {} if data_args.train_dir is not None: a__: Optional[Any] = os.path.join(data_args.train_dir , '**' ) if data_args.validation_dir is not None: a__: Any = os.path.join(data_args.validation_dir , '**' ) a__: List[str] = load_dataset( 'imagefolder' , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='image-classification' , ) # If we don't have a validation split, split off a percentage of train as validation. a__: int = None if 'validation' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0: a__: Dict = dataset['train'].train_test_split(data_args.train_val_split ) a__: Tuple = split['train'] a__: Any = split['test'] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. a__: Union[str, Any] = dataset['train'].features['labels'].names a__ , a__: Optional[int] = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): a__: Dict = str(_SCREAMING_SNAKE_CASE ) a__: List[str] = label # Load the accuracy metric from the datasets package a__: Any = evaluate.load('accuracy' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) a__: Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) a__: Union[str, Any] = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) a__: Dict = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: a__: Any = image_processor.size['shortest_edge'] else: a__: Dict = (image_processor.size['height'], image_processor.size['width']) a__: int = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) a__: Any = Compose( [ RandomResizedCrop(_SCREAMING_SNAKE_CASE ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) a__: List[Any] = Compose( [ Resize(_SCREAMING_SNAKE_CASE ), CenterCrop(_SCREAMING_SNAKE_CASE ), ToTensor(), normalize, ] ) def train_transforms(_SCREAMING_SNAKE_CASE ): a__: Any = [ _train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image'] ] return example_batch def val_transforms(_SCREAMING_SNAKE_CASE ): a__: List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: a__: Any = ( dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: a__: Dict = ( dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(_SCREAMING_SNAKE_CASE ) # Initalize our trainer a__: Tuple = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: a__: Union[str, Any] = None if training_args.resume_from_checkpoint is not None: a__: Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: a__: Union[str, Any] = last_checkpoint a__: Union[str, Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: a__: Tuple = trainer.evaluate() trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub a__: List[Any] = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'image-classification', 'dataset': data_args.dataset_name, 'tags': ['image-classification', 'vision'], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
290
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def A_ ( _lowerCAmelCase ) -> Dict: random.seed(_SCREAMING_SNAKE_CASE ) np.random.seed(_SCREAMING_SNAKE_CASE ) torch.manual_seed(_SCREAMING_SNAKE_CASE ) torch.cuda.manual_seed_all(_SCREAMING_SNAKE_CASE ) # ^^ safe to call this function even if cuda is not available class A__ : def __init__( self , A_ , A_ = 0.99_99 , A_ = 0.0 , A_ = 0 , A_ = False , A_ = 1.0 , A_ = 2 / 3 , A_ = None , A_ = None , **A_ , ): '''simple docstring''' if isinstance(__UpperCamelCase , torch.nn.Module ): UpperCamelCase : Union[str, Any] = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase , ) UpperCamelCase : str = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility UpperCamelCase : Union[str, Any] = True if kwargs.get("max_value" , __UpperCamelCase ) is not None: UpperCamelCase : str = "The `max_value` argument is deprecated. Please use `decay` instead." deprecate("max_value" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase ) UpperCamelCase : Tuple = kwargs["max_value"] if kwargs.get("min_value" , __UpperCamelCase ) is not None: UpperCamelCase : List[Any] = "The `min_value` argument is deprecated. Please use `min_decay` instead." deprecate("min_value" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase ) UpperCamelCase : List[Any] = kwargs["min_value"] UpperCamelCase : Tuple = list(__UpperCamelCase ) UpperCamelCase : List[str] = [p.clone().detach() for p in parameters] if kwargs.get("device" , __UpperCamelCase ) is not None: UpperCamelCase : int = "The `device` argument is deprecated. Please use `to` instead." deprecate("device" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase ) self.to(device=kwargs["device"] ) UpperCamelCase : Optional[int] = None UpperCamelCase : Optional[int] = decay UpperCamelCase : Union[str, Any] = min_decay UpperCamelCase : List[Any] = update_after_step UpperCamelCase : List[Any] = use_ema_warmup UpperCamelCase : Optional[Any] = inv_gamma UpperCamelCase : Dict = power UpperCamelCase : Optional[Any] = 0 UpperCamelCase : List[str] = None # set in `step()` UpperCamelCase : Optional[int] = model_cls UpperCamelCase : Any = model_config @classmethod def __UpperCamelCase( cls , A_ , A_ ): '''simple docstring''' UpperCamelCase , UpperCamelCase : List[str] = model_cls.load_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase ) UpperCamelCase : List[str] = model_cls.from_pretrained(__UpperCamelCase ) UpperCamelCase : Any = cls(model.parameters() , model_cls=__UpperCamelCase , model_config=model.config ) ema_model.load_state_dict(__UpperCamelCase ) return ema_model def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.model_cls is None: raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." ) if self.model_config is None: raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." ) UpperCamelCase : List[str] = self.model_cls.from_config(self.model_config ) UpperCamelCase : str = self.state_dict() state_dict.pop("shadow_params" , __UpperCamelCase ) model.register_to_config(**__UpperCamelCase ) self.copy_to(model.parameters() ) model.save_pretrained(__UpperCamelCase ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Any = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: UpperCamelCase : List[str] = 1 - (1 + step / self.inv_gamma) ** -self.power else: UpperCamelCase : Optional[int] = (1 + step) / (10 + step) UpperCamelCase : int = min(__UpperCamelCase , self.decay ) # make sure decay is not smaller than min_decay UpperCamelCase : Union[str, Any] = max(__UpperCamelCase , self.min_decay ) return cur_decay_value @torch.no_grad() def __UpperCamelCase( self , A_ ): '''simple docstring''' if isinstance(__UpperCamelCase , torch.nn.Module ): UpperCamelCase : List[str] = ( "Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. " "Please pass the parameters of the module instead." ) deprecate( "passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , __UpperCamelCase , standard_warn=__UpperCamelCase , ) UpperCamelCase : Dict = parameters.parameters() UpperCamelCase : Dict = list(__UpperCamelCase ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. UpperCamelCase : Optional[int] = self.get_decay(self.optimization_step ) UpperCamelCase : Union[str, Any] = decay UpperCamelCase : Union[str, Any] = 1 - decay UpperCamelCase : Union[str, Any] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , __UpperCamelCase ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): UpperCamelCase : Optional[int] = deepspeed.zero.GatheredParameters(__UpperCamelCase , modifier_rank=__UpperCamelCase ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(__UpperCamelCase ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = list(__UpperCamelCase ) for s_param, param in zip(self.shadow_params , __UpperCamelCase ): param.data.copy_(s_param.to(param.device ).data ) def __UpperCamelCase( self , A_=None , A_=None ): '''simple docstring''' UpperCamelCase : Optional[int] = [ p.to(device=__UpperCamelCase , dtype=__UpperCamelCase ) if p.is_floating_point() else p.to(device=__UpperCamelCase ) for p in self.shadow_params ] def __UpperCamelCase( self ): '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = [param.detach().cpu().clone() for param in parameters] def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" ) for c_param, param in zip(self.temp_stored_params , __UpperCamelCase ): param.data.copy_(c_param.data ) # Better memory-wise. UpperCamelCase : Union[str, Any] = None def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[str] = copy.deepcopy(__UpperCamelCase ) UpperCamelCase : str = state_dict.get("decay" , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError("Decay must be between 0 and 1" ) UpperCamelCase : str = state_dict.get("min_decay" , self.min_decay ) if not isinstance(self.min_decay , __UpperCamelCase ): raise ValueError("Invalid min_decay" ) UpperCamelCase : int = state_dict.get("optimization_step" , self.optimization_step ) if not isinstance(self.optimization_step , __UpperCamelCase ): raise ValueError("Invalid optimization_step" ) UpperCamelCase : Optional[Any] = state_dict.get("update_after_step" , self.update_after_step ) if not isinstance(self.update_after_step , __UpperCamelCase ): raise ValueError("Invalid update_after_step" ) UpperCamelCase : Optional[int] = state_dict.get("use_ema_warmup" , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , __UpperCamelCase ): raise ValueError("Invalid use_ema_warmup" ) UpperCamelCase : Any = state_dict.get("inv_gamma" , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError("Invalid inv_gamma" ) UpperCamelCase : List[Any] = state_dict.get("power" , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError("Invalid power" ) UpperCamelCase : Any = state_dict.get("shadow_params" , __UpperCamelCase ) if shadow_params is not None: UpperCamelCase : List[str] = shadow_params if not isinstance(self.shadow_params , __UpperCamelCase ): raise ValueError("shadow_params must be a list" ) if not all(isinstance(__UpperCamelCase , torch.Tensor ) for p in self.shadow_params ): raise ValueError("shadow_params must all be Tensors" )
52
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
import qiskit def lowerCamelCase ( SCREAMING_SNAKE_CASE = 2 ): '''simple docstring''' __UpperCamelCase :Any = qubits # Using Aer's simulator __UpperCamelCase :Optional[Any] = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register __UpperCamelCase :List[Any] = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , _SCREAMING_SNAKE_CASE ): # Adding CX (CNOT) gate circuit.cx(i - 1 , _SCREAMING_SNAKE_CASE ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(_SCREAMING_SNAKE_CASE ) ) , list(range(_SCREAMING_SNAKE_CASE ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator __UpperCamelCase :Optional[int] = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_000 ) return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F'Total count for various states are: {quantum_entanglement(3)}')
43
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 lowerCamelCase : str = sys.version_info >= (3, 10) def _SCREAMING_SNAKE_CASE ( lowercase : Tuple=None , lowercase : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} ) @dataclass class A: '''simple docstring''' UpperCamelCase = False UpperCamelCase = True UpperCamelCase = None class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''titi''' UpperCamelCase = '''toto''' class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = '''titi''' UpperCamelCase = '''toto''' UpperCamelCase = 42 @dataclass class A: '''simple docstring''' UpperCamelCase = '''toto''' def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicEnum(self.foo ) @dataclass class A: '''simple docstring''' UpperCamelCase = '''toto''' def a__ ( self : List[str] ) -> List[Any]: """simple docstring""" lowerCamelCase_ = MixedTypeEnum(self.foo ) @dataclass class A: '''simple docstring''' UpperCamelCase = None UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''help message'''} ) UpperCamelCase = None UpperCamelCase = list_field(default=[] ) UpperCamelCase = list_field(default=[] ) @dataclass class A: '''simple docstring''' UpperCamelCase = list_field(default=[] ) UpperCamelCase = list_field(default=[1, 2, 3] ) UpperCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) UpperCamelCase = list_field(default=[0.1, 0.2, 0.3] ) @dataclass class A: '''simple docstring''' UpperCamelCase = field() UpperCamelCase = field() UpperCamelCase = field() def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = BasicEnum(self.required_enum ) @dataclass class A: '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = field() UpperCamelCase = None UpperCamelCase = field(default='''toto''' , metadata={'''help''': '''help message'''} ) UpperCamelCase = list_field(default=['''Hallo''', '''Bonjour''', '''Hello'''] ) if is_python_no_less_than_3_10: @dataclass class A: '''simple docstring''' UpperCamelCase = False UpperCamelCase = True UpperCamelCase = None @dataclass class A: '''simple docstring''' UpperCamelCase = None UpperCamelCase = field(default=UpperCamelCase , metadata={'''help''': '''help message'''} ) UpperCamelCase = None UpperCamelCase = list_field(default=[] ) UpperCamelCase = list_field(default=[] ) class A( unittest.TestCase ): '''simple docstring''' def a__ ( self : int , A_ : argparse.ArgumentParser , A_ : argparse.ArgumentParser ) -> Dict: """simple docstring""" self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): lowerCamelCase_ = {k: v for k, v in vars(__UpperCamelCase ).items() if k != 'container'} lowerCamelCase_ = {k: v for k, v in vars(__UpperCamelCase ).items() if k != 'container'} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('choices' , __UpperCamelCase ) and yy.get('choices' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['type'](__UpperCamelCase ) , yy['type'](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : int ) -> str: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--foo' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('--bar' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('--baz' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('--flag' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='?' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = ['--foo', '1', '--baz', 'quux', '--bar', '0.5'] ((lowerCamelCase_ ) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def a__ ( self : Dict ) -> List[Any]: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--foo' , default=42 , type=__UpperCamelCase ) expected.add_argument('--baz' , default='toto' , type=__UpperCamelCase , help='help message' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : Tuple ) -> List[str]: """simple docstring""" lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--foo' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='?' ) expected.add_argument('--baz' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='?' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('--no_baz' , action='store_false' , default=__UpperCamelCase , dest='baz' ) expected.add_argument('--opt' , type=__UpperCamelCase , default=__UpperCamelCase ) lowerCamelCase_ = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) lowerCamelCase_ = parser.parse_args(['--foo', '--no_baz'] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) lowerCamelCase_ = parser.parse_args(['--foo', '--baz'] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) lowerCamelCase_ = parser.parse_args(['--foo', 'True', '--baz', 'True', '--opt', 'True'] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) lowerCamelCase_ = parser.parse_args(['--foo', 'False', '--baz', 'False', '--opt', 'False'] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def a__ ( self : Optional[Any] ) -> str: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=['titi', 'toto', 42] , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowerCamelCase_ = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) lowerCamelCase_ = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowerCamelCase_ = parser.parse_args_into_dataclasses(['--foo', 'titi'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) lowerCamelCase_ = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) lowerCamelCase_ = parser.parse_args_into_dataclasses(['--foo', '42'] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def a__ ( self : List[str] ) -> List[str]: """simple docstring""" @dataclass class A: '''simple docstring''' UpperCamelCase = '''toto''' lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument( '--foo' , default='toto' , choices=('titi', 'toto', 42) , type=make_choice_type_function(['titi', 'toto', 42] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_args([] ) self.assertEqual(args.foo , 'toto' ) lowerCamelCase_ = parser.parse_args(['--foo', 'titi'] ) self.assertEqual(args.foo , 'titi' ) lowerCamelCase_ = parser.parse_args(['--foo', '42'] ) self.assertEqual(args.foo , 42 ) def a__ ( self : int ) -> int: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--foo_int' , nargs='+' , default=[] , type=__UpperCamelCase ) expected.add_argument('--bar_int' , nargs='+' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__UpperCamelCase ) expected.add_argument('--foo_float' , nargs='+' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['Hallo', 'Bonjour', 'Hello'] , foo_float=[0.1, 0.2, 0.3] ) , ) lowerCamelCase_ = parser.parse_args('--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['a', 'b', 'c'] , foo_float=[0.1, 0.7] ) ) def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--foo' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('--bar' , default=__UpperCamelCase , type=__UpperCamelCase , help='help message' ) expected.add_argument('--baz' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('--ces' , nargs='+' , default=[] , type=__UpperCamelCase ) expected.add_argument('--des' , nargs='+' , default=[] , type=__UpperCamelCase ) lowerCamelCase_ = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) lowerCamelCase_ = parser.parse_args('--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=12 , bar=3.14 , baz='42' , ces=['a', 'b', 'c'] , des=[1, 2, 3] ) ) def a__ ( self : Any ) -> int: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--required_list' , nargs='+' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('--required_str' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : str ) -> List[Any]: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = argparse.ArgumentParser() expected.add_argument('--foo' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '--required_enum' , type=make_choice_type_function(['titi', 'toto'] ) , choices=['titi', 'toto'] , required=__UpperCamelCase , ) expected.add_argument('--opt' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('--baz' , default='toto' , type=__UpperCamelCase , help='help message' ) expected.add_argument('--foo_str' , nargs='+' , default=['Hallo', 'Bonjour', 'Hello'] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } lowerCamelCase_ = parser.parse_dict(__UpperCamelCase )[0] lowerCamelCase_ = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, 'extra': 42, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ = os.path.join(__UpperCamelCase , 'temp_json' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '.json' , 'w+' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + '.json' ) )[0] lowerCamelCase_ = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : Union[str, Any] ) -> Any: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) lowerCamelCase_ = { 'foo': 12, 'bar': 3.14, 'baz': '42', 'flag': True, } with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ = os.path.join(__UpperCamelCase , 'temp_yaml' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '.yaml' , 'w+' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) lowerCamelCase_ = parser.parse_yaml_file(Path(temp_local_path + '.yaml' ) )[0] lowerCamelCase_ = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def a__ ( self : int ) -> List[str]: """simple docstring""" lowerCamelCase_ = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
204
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
"""simple docstring""" import math import tensorflow as tf from packaging import version def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = tf.cast(math.pi , x.dtype ) SCREAMING_SNAKE_CASE__ = tf.cast(0.04_47_15 , x.dtype ) SCREAMING_SNAKE_CASE__ = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(_SCREAMING_SNAKE_CASE , 3 )) )) return x * cdf def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) return x * tf.tanh(tf.math.softplus(_SCREAMING_SNAKE_CASE ) ) def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = tf.cast(0.04_47_15 , x.dtype ) SCREAMING_SNAKE_CASE__ = tf.cast(0.79_78_84_56_08 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE__ = tf.cast(1.7_02 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def A ( snake_case__ ): '''simple docstring''' return tf.clip_by_value(_gelu(_SCREAMING_SNAKE_CASE ) , -10 , 10 ) def A ( snake_case__ , snake_case__=-1 ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = tf.split(_SCREAMING_SNAKE_CASE , 2 , axis=_SCREAMING_SNAKE_CASE ) return a * tf.math.sigmoid(_SCREAMING_SNAKE_CASE ) if version.parse(tf.version.VERSION) >= version.parse("2.4"): def A ( snake_case__ ): '''simple docstring''' return tf.keras.activations.gelu(_SCREAMING_SNAKE_CASE , approximate=_SCREAMING_SNAKE_CASE ) A_ : Any = tf.keras.activations.gelu A_ : int = approximate_gelu_wrap else: A_ : Union[str, Any] = _gelu A_ : Any = _gelu_new A_ : List[Any] = { "gelu": gelu, "gelu_10": gelu_aa, "gelu_fast": gelu_fast, "gelu_new": gelu_new, "glu": glu, "mish": mish, "quick_gelu": quick_gelu, "relu": tf.keras.activations.relu, "sigmoid": tf.keras.activations.sigmoid, "silu": tf.keras.activations.swish, "swish": tf.keras.activations.swish, "tanh": tf.keras.activations.tanh, } def A ( snake_case__ ): '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(f"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" )
165
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
import pandas as pd from matplotlib import pyplot as plt from sklearn.linear_model import LinearRegression # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split # Fitting Polynomial Regression to the dataset from sklearn.preprocessing import PolynomialFeatures # Importing the dataset lowercase__ : Any = pd.read_csv( '''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/''' '''position_salaries.csv''' ) lowercase__ : int = dataset.iloc[:, 1:2].values lowercase__ : List[Any] = dataset.iloc[:, 2].values lowercase__ : Optional[Any] = train_test_split(X, y, test_size=0.2, random_state=0) lowercase__ : Optional[int] = PolynomialFeatures(degree=4) lowercase__ : Optional[Any] = poly_reg.fit_transform(X) lowercase__ : List[Any] = LinearRegression() pol_reg.fit(X_poly, y) def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]: plt.scatter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , color='''red''' ) plt.plot(_SCREAMING_SNAKE_CASE , pol_reg.predict(poly_reg.fit_transform(_SCREAMING_SNAKE_CASE ) ) , color='''blue''' ) plt.title('''Truth or Bluff (Linear Regression)''' ) plt.xlabel('''Position level''' ) plt.ylabel('''Salary''' ) plt.show() if __name__ == "__main__": viz_polymonial() # Predicting a new result with Polymonial Regression pol_reg.predict(poly_reg.fit_transform([[5.5]])) # output should be 132148.43750003
338
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
'''simple docstring''' import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib A ={ "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } A =logging.WARNING def snake_case_ (): UpperCAmelCase = os.getenv('''DATASETS_VERBOSITY''' , _SCREAMING_SNAKE_CASE ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"Unknown option DATASETS_VERBOSITY={env_level_str}, " F"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def snake_case_ (): return __name__.split('''.''' )[0] def snake_case_ (): return logging.getLogger(_get_library_name() ) def snake_case_ (): UpperCAmelCase = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def snake_case_ (): UpperCAmelCase = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def snake_case_ (_a : Optional[str] = None ): if name is None: UpperCAmelCase = _get_library_name() return logging.getLogger(_SCREAMING_SNAKE_CASE ) def snake_case_ (): return _get_library_root_logger().getEffectiveLevel() def snake_case_ (_a : int ): _get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE ) def snake_case_ (): return set_verbosity(_SCREAMING_SNAKE_CASE ) def snake_case_ (): return set_verbosity(_SCREAMING_SNAKE_CASE ) def snake_case_ (): return set_verbosity(_SCREAMING_SNAKE_CASE ) def snake_case_ (): return set_verbosity(_SCREAMING_SNAKE_CASE ) def snake_case_ (): UpperCAmelCase = False def snake_case_ (): UpperCAmelCase = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class _a : def __init__( self : Any , *lowercase : Tuple , **lowercase : Tuple ): # pylint: disable=unused-argument '''simple docstring''' UpperCAmelCase = args[0] if args else None def __iter__( self : Union[str, Any] ): '''simple docstring''' return iter(self._iterator ) def __getattr__( self : List[str] , lowercase : Optional[Any] ): '''simple docstring''' def empty_fn(*lowercase : List[str] , **lowercase : Union[str, Any] ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : str ): '''simple docstring''' return self def __exit__( self : Any , lowercase : List[str] , lowercase : Tuple , lowercase : str ): '''simple docstring''' return A =True class _a : def __call__( self : Union[str, Any] , *lowercase : Optional[int] , lowercase : List[Any]=False , **lowercase : List[Any] ): '''simple docstring''' if _tqdm_active and not disable: return tqdm_lib.tqdm(*__UpperCamelCase , **__UpperCamelCase ) else: return EmptyTqdm(*__UpperCamelCase , **__UpperCamelCase ) def A ( self : Optional[int] , *lowercase : List[str] , **lowercase : Tuple ): '''simple docstring''' UpperCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__UpperCamelCase , **__UpperCamelCase ) def A ( self : Optional[int] ): '''simple docstring''' if _tqdm_active: return tqdm_lib.tqdm.get_lock() A =_tqdm_cls() def snake_case_ (): global _tqdm_active return bool(_tqdm_active ) def snake_case_ (): global _tqdm_active UpperCAmelCase = True def snake_case_ (): global _tqdm_active UpperCAmelCase = False
34
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' __snake_case , __snake_case : Any = position __snake_case : List[Any] = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] __snake_case : Optional[int] = [] for position in positions: __snake_case , __snake_case : List[str] = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(_SCREAMING_SNAKE_CASE ) return permissible_positions def __UpperCAmelCase ( UpperCAmelCase_ : list[list[int]] ) -> int: '''simple docstring''' return not any(elem == 0 for row in board for elem in row ) def __UpperCAmelCase ( UpperCAmelCase_ : list[list[int]] , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : int ) -> Union[str, Any]: '''simple docstring''' if is_complete(_SCREAMING_SNAKE_CASE ): return True for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ): __snake_case , __snake_case : Optional[int] = position if board[y][x] == 0: __snake_case : Dict = curr + 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ): return True __snake_case : Any = 0 return False def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> Optional[Any]: '''simple docstring''' __snake_case : int = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )] for i in range(_SCREAMING_SNAKE_CASE ): for j in range(_SCREAMING_SNAKE_CASE ): __snake_case : int = 1 if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ): return board __snake_case : Optional[Any] = 0 __snake_case : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
172
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = ['''image_processor''', '''tokenizer'''] __lowerCAmelCase = '''CLIPImageProcessor''' __lowerCAmelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''') def __init__(self : List[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Optional[Any] ): A = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , __UpperCamelCase , ) A = kwargs.pop("""feature_extractor""" ) A = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(__UpperCamelCase , __UpperCamelCase ) def __call__(self : Dict , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Optional[Any]=None , **_lowerCAmelCase : Optional[Any] ): if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: A = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if images is not None: A = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ) if text is not None and images is not None: A = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase ) def A (self : Tuple , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Optional[int] ): return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase ) def A (self : Any , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any] ): return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase ) @property def A (self : Tuple ): A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A (self : int ): warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , ) return self.image_processor_class @property def A (self : int ): warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , ) return self.image_processor
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
import socket def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) _lowerCAmelCase = socket.gethostname() _lowerCAmelCase = 1_23_12 sock.connect((host, port) ) sock.send(b"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: _lowerCAmelCase = sock.recv(10_24 ) if not data: break out_file.write(_SCREAMING_SNAKE_CASE ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
82
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Optional[int] = "poolformer" def __init__( self : List[str] , A : int=3 , A : List[Any]=16 , A : str=16 , A : List[Any]=3 , A : int=4.0 , A : str=[2, 2, 6, 2] , A : Tuple=[64, 1_28, 3_20, 5_12] , A : int=[7, 3, 3, 3] , A : str=[4, 2, 2, 2] , A : Union[str, Any]=[2, 1, 1, 1] , A : List[str]=4 , A : List[str]=0.0 , A : Any="gelu" , A : List[str]=True , A : Union[str, Any]=1e-5 , A : str=0.02 , **A : List[Any] , ) -> Dict: lowercase_ : List[str] = num_channels lowercase_ : List[Any] = patch_size lowercase_ : Tuple = stride lowercase_ : Tuple = padding lowercase_ : List[str] = pool_size lowercase_ : List[str] = hidden_sizes lowercase_ : str = mlp_ratio lowercase_ : Optional[Any] = depths lowercase_ : Optional[int] = patch_sizes lowercase_ : str = strides lowercase_ : Union[str, Any] = num_encoder_blocks lowercase_ : Optional[Any] = drop_path_rate lowercase_ : int = hidden_act lowercase_ : List[str] = use_layer_scale lowercase_ : Dict = layer_scale_init_value lowercase_ : List[str] = initializer_range super().__init__(**__UpperCamelCase ) class _UpperCAmelCase ( _A ): SCREAMING_SNAKE_CASE_ : Tuple = version.parse("1.11" ) @property def A ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def A ( self : Tuple ) -> float: return 2e-3
33
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowercase__ = logging.get_logger(__name__) if is_vision_available(): import PIL class __snake_case ( __lowerCAmelCase ): a__ = ["""pixel_values"""] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 2_55 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**__UpperCamelCase) a__: List[str] = size if size is not None else {'shortest_edge': 2_24} a__: Any = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase) a__: Dict = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} a__: Union[str, Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name='crop_size') a__: int = do_resize a__: Union[str, Any] = size a__: List[str] = resample a__: Optional[Any] = do_center_crop a__: List[str] = crop_size a__: Any = do_rescale a__: str = rescale_factor a__: Tuple = do_normalize a__: Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN a__: Dict = image_std if image_std is not None else OPENAI_CLIP_STD a__: Tuple = do_convert_rgb def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' a__: Dict = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}') a__: Dict = get_resize_output_image_size(__UpperCamelCase , size=size['shortest_edge'] , default_to_square=__UpperCamelCase) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' a__: str = get_size_dict(__UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}') return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Optional[Any]: '''simple docstring''' return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' a__: Tuple = do_resize if do_resize is not None else self.do_resize a__: Any = size if size is not None else self.size a__: Optional[Any] = get_size_dict(__UpperCamelCase , param_name='size' , default_to_square=__UpperCamelCase) a__: Optional[int] = resample if resample is not None else self.resample a__: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop a__: List[str] = crop_size if crop_size is not None else self.crop_size a__: Union[str, Any] = get_size_dict(__UpperCamelCase , param_name='crop_size' , default_to_square=__UpperCamelCase) a__: List[Any] = do_rescale if do_rescale is not None else self.do_rescale a__: List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor a__: Any = do_normalize if do_normalize is not None else self.do_normalize a__: Optional[Any] = image_mean if image_mean is not None else self.image_mean a__: Optional[int] = image_std if image_std is not None else self.image_std a__: List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a__: List[Any] = make_list_of_images(__UpperCamelCase) if not valid_images(__UpperCamelCase): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # PIL RGBA images are converted to RGB if do_convert_rgb: a__: Optional[Any] = [convert_to_rgb(__UpperCamelCase) for image in images] # All transformations expect numpy arrays. a__: Dict = [to_numpy_array(__UpperCamelCase) for image in images] if do_resize: a__: Tuple = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase) for image in images] if do_center_crop: a__: int = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase) for image in images] if do_rescale: a__: str = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase) for image in images] if do_normalize: a__: List[Any] = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase) for image in images] a__: Union[str, Any] = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images] a__: Tuple = {'pixel_values': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
290
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __lowerCamelCase : Any = "\\n Text data.\n Second line of data." __lowerCamelCase : str = "file" @pytest.fixture(scope="session" ) def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : str = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") UpperCamelCase : Optional[Any] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" ) with zstd.open(_SCREAMING_SNAKE_CASE , "wb" ) as f: f.write(_SCREAMING_SNAKE_CASE ) return path @pytest.fixture def A_ ( _lowerCAmelCase ) -> Dict: with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , "w" ) as f: f.write(_SCREAMING_SNAKE_CASE ) return FILE_PATH @pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]: UpperCamelCase : List[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} UpperCamelCase : Tuple = input_paths[compression_format] UpperCamelCase : Tuple = tmp_path / "cache" UpperCamelCase : Any = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE ) UpperCamelCase : Tuple = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) with open(_SCREAMING_SNAKE_CASE ) as f: UpperCamelCase : int = f.read() with open(_SCREAMING_SNAKE_CASE ) as f: UpperCamelCase : Optional[Any] = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" , [True, False] ) @pytest.mark.parametrize("default_cache_dir" , [True, False] ) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any: UpperCamelCase : Union[str, Any] = "custom_cache" UpperCamelCase : List[Any] = "custom_extracted_dir" UpperCamelCase : Any = tmp_path / "custom_extracted_path" if default_extracted: UpperCamelCase : str = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _SCREAMING_SNAKE_CASE ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) UpperCamelCase : Dict = xz_file UpperCamelCase : List[Any] = ( DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Dict = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE ) assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = str(Path(_SCREAMING_SNAKE_CASE ).resolve() ) assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file # relative path UpperCamelCase : Union[str, Any] = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Union[str, Any] = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path(_SCREAMING_SNAKE_CASE ) # relative path UpperCamelCase : Optional[Any] = "./__missing_file__.txt" with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path(_SCREAMING_SNAKE_CASE ) def A_ ( _lowerCAmelCase ) -> Optional[int]: UpperCamelCase : Optional[int] = get_from_cache(F"""tmp://{tmpfs_file}""" ) with open(_SCREAMING_SNAKE_CASE ) as f: UpperCamelCase : List[Any] = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE ) def A_ ( ) -> Any: with pytest.raises(_SCREAMING_SNAKE_CASE ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE ) def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(_SCREAMING_SNAKE_CASE ): http_get("https://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE ) def A_ ( _lowerCAmelCase ) -> Optional[Any]: UpperCamelCase : int = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(_SCREAMING_SNAKE_CASE ): ftp_get("ftp://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" , _SCREAMING_SNAKE_CASE ) def A_ ( _lowerCAmelCase ) -> Any: UpperCamelCase : int = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(_SCREAMING_SNAKE_CASE ): fsspec_get("s3://huggingface.co" , temp_file=_SCREAMING_SNAKE_CASE ) with pytest.raises(_SCREAMING_SNAKE_CASE ): fsspec_head("s3://huggingface.co" )
52
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput __lowercase = "scheduler_config.json" class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : Tuple = 1 a__ : List[str] = 2 a__ : List[str] = 3 a__ : Union[str, Any] = 4 a__ : List[Any] = 5 @dataclass class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : int = 4_2 class lowerCamelCase_ : '''simple docstring''' a__ : Dict = SCHEDULER_CONFIG_NAME a__ : List[Any] = ["""dtype"""] a__ : Optional[Any] = [] a__ : int = True @classmethod def UpperCamelCase__ ( cls , __lowercase = None , __lowercase = None , __lowercase=False , **__lowercase , ) -> List[str]: __UpperCamelCase , __UpperCamelCase :str = cls.load_config( pretrained_model_name_or_path=__UpperCamelCase , subfolder=__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase , ) __UpperCamelCase , __UpperCamelCase :Optional[Any] = cls.from_config(__UpperCamelCase , return_unused_kwargs=__UpperCamelCase , **__UpperCamelCase) if hasattr(__UpperCamelCase , '''create_state''') and getattr(__UpperCamelCase , '''has_state''' , __UpperCamelCase): __UpperCamelCase :Any = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def UpperCamelCase__ ( self , __lowercase , __lowercase = False , **__lowercase) -> Tuple: self.save_config(save_directory=__UpperCamelCase , push_to_hub=__UpperCamelCase , **__UpperCamelCase) @property def UpperCamelCase__ ( self) -> int: return self._get_compatibles() @classmethod def UpperCamelCase__ ( cls) -> Optional[int]: __UpperCamelCase :Optional[int] = list(set([cls.__name__] + cls._compatibles)) __UpperCamelCase :List[str] = importlib.import_module(__name__.split('''.''')[0]) __UpperCamelCase :Optional[Any] = [ getattr(__UpperCamelCase , __UpperCamelCase) for c in compatible_classes_str if hasattr(__UpperCamelCase , __UpperCamelCase) ] return compatible_classes def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' assert len(_SCREAMING_SNAKE_CASE ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_SCREAMING_SNAKE_CASE ) - x.ndim) ) , _SCREAMING_SNAKE_CASE ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.999 , SCREAMING_SNAKE_CASE=jnp.floataa ): '''simple docstring''' def alpha_bar(SCREAMING_SNAKE_CASE ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 __UpperCamelCase :Optional[Any] = [] for i in range(_SCREAMING_SNAKE_CASE ): __UpperCamelCase :List[str] = i / num_diffusion_timesteps __UpperCamelCase :List[str] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(_SCREAMING_SNAKE_CASE ) / alpha_bar(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ) return jnp.array(_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) @flax.struct.dataclass class lowerCamelCase_ : '''simple docstring''' a__ : List[Any] = 4_2 a__ : List[Any] = 4_2 a__ : str = 4_2 @classmethod def UpperCamelCase__ ( cls , __lowercase) -> List[str]: __UpperCamelCase :Dict = scheduler.config if config.trained_betas is not None: __UpperCamelCase :Union[str, Any] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype) elif config.beta_schedule == "linear": __UpperCamelCase :str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __UpperCamelCase :Any = ( jnp.linspace( config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __UpperCamelCase :int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype) else: raise NotImplementedError( f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""") __UpperCamelCase :Optional[int] = 1.0 - betas __UpperCamelCase :Any = jnp.cumprod(__UpperCamelCase , axis=0) return cls( alphas=__UpperCamelCase , betas=__UpperCamelCase , alphas_cumprod=__UpperCamelCase , ) def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Tuple = state.alphas_cumprod __UpperCamelCase :Tuple = alphas_cumprod[timesteps] ** 0.5 __UpperCamelCase :List[str] = sqrt_alpha_prod.flatten() __UpperCamelCase :Optional[int] = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape ) __UpperCamelCase :Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5 __UpperCamelCase :Optional[Any] = sqrt_one_minus_alpha_prod.flatten() __UpperCamelCase :Optional[int] = broadcast_to_shape_from_left(_SCREAMING_SNAKE_CASE , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :List[str] = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase :int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase , __UpperCamelCase :Optional[Any] = get_sqrt_alpha_prod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
43
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests lowerCamelCase : Tuple = open # noqa: we just need to have a builtin inside this module to test it properly
204
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase (metaclass=A__ ): lowerCamelCase__ : List[Any] = ['speech'] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any ) -> Union[str, Any]: requires_backends(self , ["""speech"""] ) class lowerCamelCase (metaclass=A__ ): lowerCamelCase__ : List[str] = ['speech'] def __init__( self : str , *__UpperCAmelCase : Optional[int] , **__UpperCAmelCase : List[Any] ) -> List[Any]: requires_backends(self , ["""speech"""] )
165
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 5_0 ) -> Tuple: lowerCAmelCase = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
338
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
'''simple docstring''' import random def snake_case_ (_a : str , _a : Optional[int] , _a : Union[str, Any] ): UpperCAmelCase = a[left_index] UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: UpperCAmelCase , UpperCAmelCase = a[i], a[j] i += 1 UpperCAmelCase , UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def snake_case_ (_a : Any , _a : Optional[Any] , _a : Optional[Any] ): if left < right: UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) UpperCAmelCase , UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def snake_case_ (): UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
34
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9}, }, { """framework""": """tensorflow""", """script""": """run_tf.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.g4dn.xlarge""", """results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9}, }, ] ) class UpperCamelCase ( unittest.TestCase ): def _lowercase (self : List[str]) -> Union[str, Any]: if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=__UpperCamelCase , ) assert hasattr(self , 'env') def _lowercase (self : Optional[Any] , _A : Tuple=1) -> List[str]: # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , ) def _lowercase (self : List[str] , _A : Optional[int]) -> List[Any]: TrainingJobAnalytics(__UpperCamelCase).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def _lowercase (self : str) -> int: # create estimator __snake_case : List[str] = self.create_estimator() # run training estimator.fit() # result dataframe __snake_case : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis __snake_case : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value']) __snake_case : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value']) # get train time from SageMaker job, this includes starting, preprocessing, stopping __snake_case : Any = ( Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy) assert all(t <= self.results['eval_loss'] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json" , 'w') as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __UpperCamelCase)
172
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' from __future__ import annotations _lowerCamelCase : Union[str, Any] = 10 def __a ( UpperCAmelCase ) ->Any: """simple docstring""" A = 1 A = max(_SCREAMING_SNAKE_CASE ) while placement <= max_digit: # declare and initialize empty buckets A = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # split list_of_ints between the buckets for i in list_of_ints: A = int((i / placement) % RADIX ) buckets[tmp].append(_SCREAMING_SNAKE_CASE ) # put each buckets' contents into list_of_ints A = 0 for b in range(_SCREAMING_SNAKE_CASE ): for i in buckets[b]: A = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
import pytest import datasets # Import fixture modules as plugins A__ = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" for item in items: if any(marker in item.keywords for marker in ["""integration""", """unit"""] ): continue item.add_marker(pytest.mark.unit ) def _UpperCAmelCase ( snake_case ): """simple docstring""" config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( snake_case , snake_case ): """simple docstring""" _lowerCAmelCase = tmp_path_factory.getbasetemp() / """cache""" _lowerCAmelCase = test_hf_cache_home / """datasets""" _lowerCAmelCase = test_hf_cache_home / """metrics""" _lowerCAmelCase = test_hf_cache_home / """modules""" monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = test_hf_datasets_cache / """downloads""" monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) ) _lowerCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted""" monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="""session""" ) def _UpperCAmelCase ( ): """simple docstring""" datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( snake_case ): """simple docstring""" monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _SCREAMING_SNAKE_CASE ) @pytest.fixture def _UpperCAmelCase ( snake_case ): """simple docstring""" monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _SCREAMING_SNAKE_CASE )
82
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration __A : Any = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def lowercase ( __snake_case : Union[str, Any] ): lowercase_ : Tuple = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __A : List[Any] = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def lowercase ( __snake_case : Optional[Any] ): lowercase_ : Dict = list(s_dict.keys() ) for key in keys: lowercase_ : Tuple = key for k, v in WHISPER_MAPPING.items(): if k in key: lowercase_ : List[Any] = new_key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(F'''{key} -> {new_key}''' ) lowercase_ : List[Any] = s_dict.pop(_SCREAMING_SNAKE_CASE ) return s_dict def lowercase ( __snake_case : Union[str, Any] ): lowercase_ , lowercase_ : List[str] = emb.weight.shape lowercase_ : Optional[int] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = emb.weight.data return lin_layer def lowercase ( __snake_case : str , __snake_case : str ): os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) lowercase_ : int = os.path.basename(_SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = url.split('''/''' )[-2] lowercase_ : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ) and not os.path.isfile(_SCREAMING_SNAKE_CASE ): raise RuntimeError(F'''{download_target} exists and is not a regular file''' ) if os.path.isfile(_SCREAMING_SNAKE_CASE ): lowercase_ : Dict = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read() if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(_SCREAMING_SNAKE_CASE ) as source, open(_SCREAMING_SNAKE_CASE , '''wb''' ) as output: with tqdm( total=int(source.info().get('''Content-Length''' ) ) , ncols=8_0 , unit='''iB''' , unit_scale=_SCREAMING_SNAKE_CASE , unit_divisor=1_0_2_4 ) as loop: while True: lowercase_ : int = source.read(8_1_9_2 ) if not buffer: break output.write(_SCREAMING_SNAKE_CASE ) loop.update(len(_SCREAMING_SNAKE_CASE ) ) lowercase_ : Optional[Any] = open(_SCREAMING_SNAKE_CASE , '''rb''' ).read() if hashlib.shaaaa(_SCREAMING_SNAKE_CASE ).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''' ) return model_bytes def lowercase ( __snake_case : List[Any] , __snake_case : List[Any] ): if ".pt" not in checkpoint_path: lowercase_ : Dict = _download(_MODELS[checkpoint_path] ) else: lowercase_ : Dict = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) lowercase_ : Union[str, Any] = original_checkpoint['''dims'''] lowercase_ : Optional[int] = original_checkpoint['''model_state_dict'''] lowercase_ : Tuple = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(_SCREAMING_SNAKE_CASE ) rename_keys(_SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = True lowercase_ : Optional[int] = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowercase_ : Dict = WhisperConfig( vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=_SCREAMING_SNAKE_CASE , decoder_ffn_dim=_SCREAMING_SNAKE_CASE , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , ) lowercase_ : Optional[int] = WhisperForConditionalGeneration(_SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : List[Any] = model.model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0 and not set(_SCREAMING_SNAKE_CASE ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F''' but all the following weights are missing {missing}''' ) if tie_embeds: lowercase_ : Any = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowercase_ : str = proj_out_weights model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Any = argparse.ArgumentParser() # # Required parameters parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''') parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') __A : Any = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
33
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def __a ( _SCREAMING_SNAKE_CASE ) ->Optional[Any]: if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ): return x return (x, x) @require_flax class __snake_case : def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]: '''simple docstring''' pass def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' pass def lowerCamelCase_ ( self) -> str: '''simple docstring''' pass def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__: Dict = np.abs((a - b)).max() self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , f'Difference between torch and flax is {diff} (>= {tol}).') def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]: '''simple docstring''' a__: Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase) a__: Optional[int] = FlaxVisionTextDualEncoderModel(__UpperCamelCase) a__: Union[str, Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]: '''simple docstring''' a__ , a__: Any = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase) a__: Optional[int] = {'vision_model': vision_model, 'text_model': text_model} a__: Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase) a__: List[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim)) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> str: '''simple docstring''' a__ , a__: Union[str, Any] = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase) a__: Optional[int] = {'vision_model': vision_model, 'text_model': text_model} a__: Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase) a__: Tuple = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase) a__: Optional[int] = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCamelCase) a__: Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase) a__: Any = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase) a__: List[str] = after_output[0] a__: List[str] = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(__UpperCamelCase , 1e-3) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Dict: '''simple docstring''' a__ , a__: int = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase) a__: List[str] = {'vision_model': vision_model, 'text_model': text_model} a__: Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase) a__: Any = model( input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase) a__: Union[str, Any] = output.vision_model_output.attentions self.assertEqual(len(__UpperCamelCase) , vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) a__: int = to_atuple(vision_model.config.image_size) a__: Optional[Any] = to_atuple(vision_model.config.patch_size) a__: Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) a__: str = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len)) a__: List[str] = output.text_model_output.attentions self.assertEqual(len(__UpperCamelCase) , text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' pt_model.to(__UpperCamelCase) pt_model.eval() # prepare inputs a__: Optional[int] = inputs_dict a__: Dict = {k: torch.tensor(v.tolist()) for k, v in flax_inputs.items()} with torch.no_grad(): a__: int = pt_model(**__UpperCamelCase).to_tuple() a__: Optional[int] = fx_model(**__UpperCamelCase).to_tuple() self.assertEqual(len(__UpperCamelCase) , len(__UpperCamelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4]): self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__UpperCamelCase) a__: str = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase) a__: Optional[Any] = fx_model_loaded(**__UpperCamelCase).to_tuple() self.assertEqual(len(__UpperCamelCase) , len(__UpperCamelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4]): self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__UpperCamelCase) a__: Optional[Any] = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase) pt_model_loaded.to(__UpperCamelCase) pt_model_loaded.eval() with torch.no_grad(): a__: List[str] = pt_model_loaded(**__UpperCamelCase).to_tuple() self.assertEqual(len(__UpperCamelCase) , len(__UpperCamelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4]): self.assert_almost_equals(__UpperCamelCase , pt_output_loaded.numpy() , 4e-2) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__: Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase) a__: str = VisionTextDualEncoderModel(__UpperCamelCase) a__: Any = FlaxVisionTextDualEncoderModel(__UpperCamelCase) a__: Optional[Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase) a__: Optional[Any] = fx_state self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> List[str]: '''simple docstring''' a__: int = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase) a__: Tuple = VisionTextDualEncoderModel(__UpperCamelCase) a__: Optional[int] = FlaxVisionTextDualEncoderModel(__UpperCamelCase) a__: Optional[Any] = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params) self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) def lowerCamelCase_ ( self) -> Any: '''simple docstring''' a__: int = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__UpperCamelCase) def lowerCamelCase_ ( self) -> Any: '''simple docstring''' a__: Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase) def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: str = self.prepare_config_and_inputs() self.check_save_load(**__UpperCamelCase) def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__UpperCamelCase) @is_pt_flax_cross_test def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: int = self.prepare_config_and_inputs() a__: List[Any] = config_inputs_dict.pop('vision_config') a__: Any = config_inputs_dict.pop('text_config') a__: Tuple = config_inputs_dict self.check_equivalence_pt_to_flax(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) self.check_equivalence_flax_to_pt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) @slow def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__ , a__: List[Any] = self.get_pretrained_model_and_inputs() a__: List[Any] = model_a(**__UpperCamelCase) a__: str = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__UpperCamelCase) a__: List[str] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase) a__: Union[str, Any] = model_a(**__UpperCamelCase) a__: Tuple = after_outputs[0] a__: str = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(__UpperCamelCase , 1e-5) @require_flax class __snake_case ( __lowerCAmelCase , unittest.TestCase ): def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' a__: List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , ) a__: Optional[Any] = 13 a__: Union[str, Any] = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) a__: Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) a__: Union[str, Any] = random_attention_mask([batch_size, 4]) a__: Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCamelCase_ ( self , lowercase , lowercase) -> Optional[Any]: '''simple docstring''' a__: Optional[int] = FlaxViTModel(__UpperCamelCase) a__: Any = FlaxBertModel(__UpperCamelCase) return vision_model, text_model def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: List[Any] = FlaxViTModelTester(self) a__: List[str] = FlaxBertModelTester(self) a__: List[str] = vit_model_tester.prepare_config_and_inputs() a__: Dict = bert_model_tester.prepare_config_and_inputs() a__ , a__: Union[str, Any] = vision_config_and_inputs a__ , a__ , a__ , a__: Dict = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class __snake_case ( __lowerCAmelCase , unittest.TestCase ): def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , ) a__: Any = 13 a__: Tuple = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ]) a__: int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size) a__: Any = random_attention_mask([batch_size, 4]) a__: int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCamelCase_ ( self , lowercase , lowercase) -> str: '''simple docstring''' a__: int = FlaxCLIPVisionModel(__UpperCamelCase) a__: Optional[int] = FlaxBertModel(__UpperCamelCase) return vision_model, text_model def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: Union[str, Any] = FlaxCLIPVisionModelTester(self) a__: int = FlaxBertModelTester(self) a__: Optional[int] = clip_model_tester.prepare_config_and_inputs() a__: List[Any] = bert_model_tester.prepare_config_and_inputs() a__ , a__: Optional[Any] = vision_config_and_inputs a__ , a__ , a__ , a__: Any = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class __snake_case ( unittest.TestCase ): @slow def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: List[str] = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0) a__: str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian') a__: Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') a__: List[Any] = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors='np') a__: Union[str, Any] = model(**__UpperCamelCase) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) a__: Dict = np.array([[1.2284727, 0.3104122]]) self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1e-3))
290
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = tempfile.mkdtemp() # fmt: off UpperCamelCase : Optional[Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCamelCase : Any = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) UpperCamelCase : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCamelCase : int = {"unk_token": "<unk>"} UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(__UpperCamelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(__UpperCamelCase ) ) UpperCamelCase : Optional[int] = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], "image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], } UpperCamelCase : Any = os.path.join(self.tmpdirname , __UpperCamelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(__UpperCamelCase , __UpperCamelCase ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCamelCase ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__UpperCamelCase ) def __UpperCamelCase( self , **A_ ): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCamelCase : int = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.get_tokenizer() UpperCamelCase : Optional[int] = self.get_rust_tokenizer() UpperCamelCase : str = self.get_image_processor() UpperCamelCase : Any = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) UpperCamelCase : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase ) UpperCamelCase : Any = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) UpperCamelCase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCamelCase : List[Any] = self.get_image_processor(do_normalize=__UpperCamelCase ) UpperCamelCase : str = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCamelCase ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.get_image_processor() UpperCamelCase : List[str] = self.get_tokenizer() UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) UpperCamelCase : Dict = self.prepare_image_inputs() UpperCamelCase : Any = image_processor(__UpperCamelCase , return_tensors="np" ) UpperCamelCase : Dict = processor(images=__UpperCamelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.get_image_processor() UpperCamelCase : int = self.get_tokenizer() UpperCamelCase : int = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) UpperCamelCase : str = "lower newer" UpperCamelCase : Optional[int] = processor(text=__UpperCamelCase , return_tensors="np" ) UpperCamelCase : List[Any] = tokenizer(__UpperCamelCase , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.get_image_processor() UpperCamelCase : str = self.get_tokenizer() UpperCamelCase : int = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) UpperCamelCase : str = "lower newer" UpperCamelCase : Tuple = self.prepare_image_inputs() UpperCamelCase : Dict = processor(text=__UpperCamelCase , images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = "google/owlvit-base-patch32" UpperCamelCase : Tuple = OwlViTProcessor.from_pretrained(__UpperCamelCase ) UpperCamelCase : Any = ["cat", "nasa badge"] UpperCamelCase : Optional[Any] = processor(text=__UpperCamelCase ) UpperCamelCase : List[str] = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "google/owlvit-base-patch32" UpperCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(__UpperCamelCase ) UpperCamelCase : List[str] = [["cat", "nasa badge"], ["person"]] UpperCamelCase : Tuple = processor(text=__UpperCamelCase ) UpperCamelCase : Dict = 16 UpperCamelCase : List[str] = len(__UpperCamelCase ) UpperCamelCase : List[str] = max([len(__UpperCamelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = "google/owlvit-base-patch32" UpperCamelCase : Dict = OwlViTProcessor.from_pretrained(__UpperCamelCase ) UpperCamelCase : int = ["cat", "nasa badge"] UpperCamelCase : Union[str, Any] = processor(text=__UpperCamelCase ) UpperCamelCase : List[Any] = 16 UpperCamelCase : Tuple = inputs["input_ids"] UpperCamelCase : List[Any] = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : str = self.get_image_processor() UpperCamelCase : Optional[int] = self.get_tokenizer() UpperCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) UpperCamelCase : Tuple = self.prepare_image_inputs() UpperCamelCase : Optional[Any] = self.prepare_image_inputs() UpperCamelCase : Any = processor(images=__UpperCamelCase , query_images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dict = self.get_image_processor() UpperCamelCase : Optional[Any] = self.get_tokenizer() UpperCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase ) UpperCamelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCamelCase : List[Any] = processor.batch_decode(__UpperCamelCase ) UpperCamelCase : str = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
52
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): # noqa: E741 '''simple docstring''' __UpperCamelCase :List[str] = len(_SCREAMING_SNAKE_CASE ) __UpperCamelCase :int = 0 __UpperCamelCase :Optional[Any] = [0] * n __UpperCamelCase :Any = [False] * n __UpperCamelCase :List[str] = [False] * n def dfs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): if parent == root: out_edge_count += 1 __UpperCamelCase :Tuple = True __UpperCamelCase :Optional[Any] = at for to in l[at]: if to == parent: pass elif not visited[to]: __UpperCamelCase :Union[str, Any] = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCamelCase :str = min(low[at] , low[to] ) # AP found via bridge if at < low[to]: __UpperCamelCase :Optional[int] = True # AP found via cycle if at == low[to]: __UpperCamelCase :List[Any] = True else: __UpperCamelCase :Any = min(low[at] , _SCREAMING_SNAKE_CASE ) return out_edge_count for i in range(_SCREAMING_SNAKE_CASE ): if not visited[i]: __UpperCamelCase :Optional[int] = 0 __UpperCamelCase :Union[str, Any] = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE ) __UpperCamelCase :Any = out_edge_count > 1 for x in range(len(_SCREAMING_SNAKE_CASE ) ): if is_art[x] is True: print(_SCREAMING_SNAKE_CASE ) # Adjacency list of graph __lowercase = { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], } compute_ap(data)
43
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
def _SCREAMING_SNAKE_CASE ( lowercase : int ): '''simple docstring''' if a < 0: raise ValueError('Input value must be a positive integer' ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError('Input value must be a \'int\' type' ) return bin(_SCREAMING_SNAKE_CASE ).count('1' ) if __name__ == "__main__": import doctest doctest.testmod()
204
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A_ : Tuple = {"configuration_vit_mae": ["VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMAEConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Any = [ "VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST", "ViTMAEForPreTraining", "ViTMAELayer", "ViTMAEModel", "ViTMAEPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : Dict = [ "TFViTMAEForPreTraining", "TFViTMAEModel", "TFViTMAEPreTrainedModel", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys A_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
165
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
import qiskit def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> Optional[int]: lowerCAmelCase = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register lowerCAmelCase = qiskit.QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Map the quantum measurement to the classical bits circuit.measure([0] , [0] ) # Execute the circuit on the simulator lowerCAmelCase = qiskit.execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
338
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
'''simple docstring''' import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _a ( __a ): __a : str = 0 __a : List[Any] = False __a : str = 3.0 class _a ( unittest.TestCase ): def A ( self : List[str] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCamelCase ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def A ( self : List[str] ): '''simple docstring''' UpperCAmelCase = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() UpperCAmelCase = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) UpperCAmelCase = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , __UpperCamelCase ) @require_multi_gpu def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = ['''torchrun''', f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": A =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) A =Accelerator(kwargs_handlers=[ddp_scaler]) A =torch.nn.Linear(1_00, 2_00) A =accelerator.prepare(model) # Check the values changed in kwargs A ="" A =model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
34
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
"""simple docstring""" import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase : def __init__(self : int , _A : List[str] , _A : Optional[Any]=13 , _A : Any=30 , _A : Optional[int]=2 , _A : Union[str, Any]=3 , _A : str=True , _A : Optional[int]=True , _A : Dict=32 , _A : Optional[int]=5 , _A : List[Any]=4 , _A : str=37 , _A : Union[str, Any]="gelu" , _A : List[str]=0.1 , _A : Optional[int]=0.1 , _A : int=10 , _A : str=0.02 , _A : Dict=None , ) -> Dict: __snake_case : Dict = parent __snake_case : Tuple = batch_size __snake_case : str = image_size __snake_case : int = patch_size __snake_case : Optional[int] = num_channels __snake_case : List[Any] = is_training __snake_case : Dict = use_labels __snake_case : List[Any] = hidden_size __snake_case : Optional[Any] = num_hidden_layers __snake_case : Tuple = num_attention_heads __snake_case : Optional[Any] = intermediate_size __snake_case : Optional[Any] = hidden_act __snake_case : List[str] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : int = type_sequence_label_size __snake_case : List[str] = initializer_range __snake_case : Optional[int] = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) __snake_case : Optional[int] = (image_size // patch_size) ** 2 __snake_case : Union[str, Any] = num_patches + 1 def _lowercase (self : Dict) -> int: __snake_case : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __snake_case : List[Any] = None if self.use_labels: __snake_case : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : Any = self.get_config() return config, pixel_values, labels def _lowercase (self : Any) -> Optional[int]: return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowercase (self : Any , _A : Dict , _A : Tuple , _A : Optional[Any]) -> List[Any]: __snake_case : int = ViTMSNModel(config=__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Optional[Any] = model(__UpperCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowercase (self : Any , _A : int , _A : str , _A : int) -> Any: __snake_case : List[str] = self.type_sequence_label_size __snake_case : Dict = ViTMSNForImageClassification(__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Any = model(__UpperCamelCase , labels=__UpperCamelCase) print('Pixel and labels shape: {pixel_values.shape}, {labels.shape}') print('Labels: {labels}') self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images __snake_case : Any = 1 __snake_case : int = ViTMSNForImageClassification(__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) __snake_case : int = model(__UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowercase (self : int) -> Union[str, Any]: __snake_case : Union[str, Any] = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : List[Any] = config_and_inputs __snake_case : Dict = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ): UpperCAmelCase : Optional[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () UpperCAmelCase : List[Any] = ( {"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification} if is_torch_available() else {} ) UpperCAmelCase : Tuple = False UpperCAmelCase : List[str] = False UpperCAmelCase : Union[str, Any] = False UpperCAmelCase : str = False def _lowercase (self : Dict) -> Tuple: __snake_case : Tuple = ViTMSNModelTester(self) __snake_case : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37) def _lowercase (self : Dict) -> Dict: self.config_tester.run_common_tests() @unittest.skip(reason='ViTMSN does not use inputs_embeds') def _lowercase (self : Any) -> List[str]: pass def _lowercase (self : Optional[int]) -> Any: __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__UpperCamelCase) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) __snake_case : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear)) def _lowercase (self : Optional[Any]) -> List[str]: __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[int] = model_class(__UpperCamelCase) __snake_case : Optional[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : str = [*signature.parameters.keys()] __snake_case : Any = ['pixel_values'] self.assertListEqual(arg_names[:1] , __UpperCamelCase) def _lowercase (self : Tuple) -> str: __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase) def _lowercase (self : Optional[int]) -> Union[str, Any]: __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase) @slow def _lowercase (self : List[Any]) -> List[str]: for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : List[Any] = ViTMSNModel.from_pretrained(__UpperCamelCase) self.assertIsNotNone(__UpperCamelCase) def __UpperCAmelCase ( ) -> Optional[Any]: '''simple docstring''' __snake_case : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _lowercase (self : Any) -> Union[str, Any]: return ViTImageProcessor.from_pretrained('facebook/vit-msn-small') if is_vision_available() else None @slow def _lowercase (self : List[str]) -> int: torch.manual_seed(2) __snake_case : Union[str, Any] = ViTMSNForImageClassification.from_pretrained('facebook/vit-msn-small').to(__UpperCamelCase) __snake_case : str = self.default_image_processor __snake_case : Optional[int] = prepare_img() __snake_case : str = image_processor(images=__UpperCamelCase , return_tensors='pt').to(__UpperCamelCase) # forward pass with torch.no_grad(): __snake_case : Tuple = model(**__UpperCamelCase) # verify the logits __snake_case : str = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __UpperCamelCase) __snake_case : Dict = torch.tensor([-0.0_803, -0.4_454, -0.2_375]).to(__UpperCamelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4))
172
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
'''simple docstring''' def __a ( UpperCAmelCase , UpperCAmelCase ) ->int: """simple docstring""" A = int(_SCREAMING_SNAKE_CASE ) # Initialize Result A = [] # Traverse through all denomination for denomination in reversed(_SCREAMING_SNAKE_CASE ): # Find denominations while int(_SCREAMING_SNAKE_CASE ) >= int(_SCREAMING_SNAKE_CASE ): total_value -= int(_SCREAMING_SNAKE_CASE ) answer.append(_SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : str = "0" if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): _lowerCamelCase : Optional[int] = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f"Denomination {i}: ").strip())) _lowerCamelCase : List[Any] = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter _lowerCamelCase : int = [1, 2, 5, 10, 20, 50, 100, 500, 2000] _lowerCamelCase : str = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f"Following is minimal change for {value}: ") _lowerCamelCase : int = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A__ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = ["PLBartTokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ = [ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", "PLBartModel", "PLBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
82
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( _A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[str] = CodeGenTokenizer SCREAMING_SNAKE_CASE_ : Any = CodeGenTokenizerFast SCREAMING_SNAKE_CASE_ : Any = True SCREAMING_SNAKE_CASE_ : Dict = {"add_prefix_space": True} SCREAMING_SNAKE_CASE_ : Dict = False def A ( self : Dict ) -> Optional[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] lowercase_ : Dict = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) ) lowercase_ : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] lowercase_ : Union[str, Any] = {'''unk_token''': '''<unk>'''} lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__UpperCamelCase ) ) def A ( self : Dict , **A : Tuple ) -> List[str]: kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def A ( self : str , **A : List[str] ) -> Dict: kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def A ( self : Union[str, Any] , A : Union[str, Any] ) -> Union[str, Any]: lowercase_ : Dict = '''lower newer''' lowercase_ : int = '''lower newer''' return input_text, output_text def A ( self : Dict ) -> str: lowercase_ : Optional[int] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) lowercase_ : List[str] = '''lower newer''' lowercase_ : List[str] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] lowercase_ : Tuple = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) lowercase_ : str = tokens + [tokenizer.unk_token] lowercase_ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def A ( self : str ) -> Optional[Any]: if not self.test_rust_tokenizer: return lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase ) lowercase_ : List[str] = '''lower newer''' # Testing tokenization lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase , add_prefix_space=__UpperCamelCase ) lowercase_ : List[Any] = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # Testing conversion to ids without special tokens lowercase_ : str = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase , add_prefix_space=__UpperCamelCase ) lowercase_ : int = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # Testing conversion to ids with special tokens lowercase_ : int = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase ) lowercase_ : int = tokenizer.encode(__UpperCamelCase , add_prefix_space=__UpperCamelCase ) lowercase_ : Optional[int] = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) # Testing the unknown token lowercase_ : List[Any] = tokens + [rust_tokenizer.unk_token] lowercase_ : str = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase ) def A ( self : Optional[Any] , *A : Optional[int] , **A : str ) -> Tuple: # It's very difficult to mix/test pretokenization with byte-level # And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def A ( self : List[str] , A : List[str]=15 ) -> Tuple: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : str = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase ) # Simple input lowercase_ : Union[str, Any] = '''This is a simple input''' lowercase_ : Dict = ['''This is a simple input 1''', '''This is a simple input 2'''] lowercase_ : Tuple = ('''This is a simple input''', '''This is a pair''') lowercase_ : Union[str, Any] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) # Simple input self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) # Simple input self.assertRaises( __UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , ) # Pair input self.assertRaises(__UpperCamelCase , tokenizer_r.encode , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) # Pair input self.assertRaises(__UpperCamelCase , tokenizer_r.encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' ) # Pair input self.assertRaises( __UpperCamelCase , tokenizer_r.batch_encode_plus , __UpperCamelCase , max_length=__UpperCamelCase , padding='''max_length''' , ) def A ( self : str ) -> Union[str, Any]: lowercase_ : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input lowercase_ : Optional[Any] = '''This is a simple input''' lowercase_ : List[Any] = ['''This is a simple input looooooooong''', '''This is a simple input'''] lowercase_ : Optional[int] = ('''This is a simple input''', '''This is a pair''') lowercase_ : List[str] = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] lowercase_ : List[str] = tokenizer.pad_token_id lowercase_ : int = tokenizer(__UpperCamelCase , padding='''max_length''' , max_length=30 , return_tensors='''np''' ) lowercase_ : Any = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='''np''' ) lowercase_ : Tuple = tokenizer(*__UpperCamelCase , padding='''max_length''' , max_length=60 , return_tensors='''np''' ) lowercase_ : int = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , truncate=__UpperCamelCase , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def A ( self : List[Any] ) -> int: lowercase_ : Union[str, Any] = '''$$$''' lowercase_ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__UpperCamelCase , add_bos_token=__UpperCamelCase ) lowercase_ : Union[str, Any] = '''This is a simple input''' lowercase_ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] lowercase_ : Any = tokenizer.bos_token_id lowercase_ : Optional[Any] = tokenizer(__UpperCamelCase ) lowercase_ : Dict = tokenizer(__UpperCamelCase ) self.assertEqual(out_s.input_ids[0] , __UpperCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase_ : Union[str, Any] = tokenizer.decode(out_s.input_ids ) lowercase_ : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , __UpperCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def A ( self : Union[str, Any] ) -> Optional[Any]: lowercase_ : Dict = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' ) lowercase_ : List[Any] = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#''' lowercase_ : Optional[Any] = '''\nif len_a > len_b: result = a\nelse: result = b''' lowercase_ : List[Any] = tokenizer.encode(__UpperCamelCase ) lowercase_ : Optional[int] = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n'''] lowercase_ : str = tokenizer.decode(__UpperCamelCase , truncate_before_pattern=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def A ( self : Optional[int] ) -> Tuple: pass
33
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
"""simple docstring""" from collections import defaultdict from math import ceil, sqrt def __a ( _SCREAMING_SNAKE_CASE = 1000000 , _SCREAMING_SNAKE_CASE = 10 ) ->List[Any]: a__: str = defaultdict(_SCREAMING_SNAKE_CASE ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: a__: List[str] = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: a__: Union[str, Any] = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(_SCREAMING_SNAKE_CASE , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f"{solution() = }")
290
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : List[str] = { "huggingface/informer-tourism-monthly": ( "https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json" ), # See all Informer models at https://huggingface.co/models?filter=informer } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'informer' _UpperCAmelCase :Optional[Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = None , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 0.05 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , A_ = "prob" , A_ = 5 , A_ = True , **A_ , ): '''simple docstring''' UpperCamelCase : str = prediction_length UpperCamelCase : str = context_length or prediction_length UpperCamelCase : Union[str, Any] = distribution_output UpperCamelCase : Union[str, Any] = loss UpperCamelCase : Optional[Any] = input_size UpperCamelCase : Dict = num_time_features UpperCamelCase : Union[str, Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] UpperCamelCase : Union[str, Any] = scaling UpperCamelCase : Tuple = num_dynamic_real_features UpperCamelCase : Optional[Any] = num_static_real_features UpperCamelCase : Optional[Any] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(__UpperCamelCase ) != num_static_categorical_features: raise ValueError( "The cardinality should be a list of the same length as `num_static_categorical_features`" ) UpperCamelCase : str = cardinality else: UpperCamelCase : Union[str, Any] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(__UpperCamelCase ) != num_static_categorical_features: raise ValueError( "The embedding dimension should be a list of the same length as `num_static_categorical_features`" ) UpperCamelCase : List[str] = embedding_dimension else: UpperCamelCase : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] UpperCamelCase : List[Any] = num_parallel_samples # Transformer architecture configuration UpperCamelCase : Any = input_size * len(self.lags_sequence ) + self._number_of_features UpperCamelCase : Optional[Any] = d_model UpperCamelCase : Tuple = encoder_attention_heads UpperCamelCase : Optional[Any] = decoder_attention_heads UpperCamelCase : List[Any] = encoder_ffn_dim UpperCamelCase : int = decoder_ffn_dim UpperCamelCase : Optional[Any] = encoder_layers UpperCamelCase : str = decoder_layers UpperCamelCase : str = dropout UpperCamelCase : Optional[Any] = attention_dropout UpperCamelCase : Optional[Any] = activation_dropout UpperCamelCase : Optional[Any] = encoder_layerdrop UpperCamelCase : Optional[Any] = decoder_layerdrop UpperCamelCase : Optional[int] = activation_function UpperCamelCase : Optional[Any] = init_std UpperCamelCase : Optional[int] = use_cache # Informer UpperCamelCase : int = attention_type UpperCamelCase : Tuple = sampling_factor UpperCamelCase : Dict = distil super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase ) @property def __UpperCamelCase( self ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
52
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
import argparse import json import os import re import torch from transformers import BloomConfig, BloomModel from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.utils import logging logging.set_verbosity_info() __lowercase = [ "word_embeddings_layernorm.weight", "word_embeddings_layernorm.bias", "input_layernorm.weight", "input_layernorm.bias", "post_attention_layernorm.weight", "post_attention_layernorm.bias", "self_attention.dense.bias", "mlp.dense_4h_to_h.bias", "ln_f.weight", "ln_f.bias", ] __lowercase = [ "mlp.dense_4h_to_h.weight", "self_attention.dense.weight", ] def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :Dict = { '''word_embeddings.weight''': '''word_embeddings.weight''', '''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''', '''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''', '''weight''': '''ln_f.weight''', '''bias''': '''ln_f.bias''', } if key in layer_rename_map: return layer_rename_map[key] # Handle transformer blocks __UpperCamelCase :Any = int(re.match(R'''.*layer_(\d*).*''' , _SCREAMING_SNAKE_CASE )[1] ) layer_number -= 3 return f"""h.{layer_number}.""" + key def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' if dtype == torch.bool: return 1 / 8 __UpperCamelCase :Dict = re.search(R'''[^\d](\d+)$''' , str(_SCREAMING_SNAKE_CASE ) ) if bit_search is None: raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" ) __UpperCamelCase :Dict = int(bit_search.groups()[0] ) return bit_size // 8 def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' if bloom_config_file == "": __UpperCamelCase :List[str] = BloomConfig() else: __UpperCamelCase :Optional[Any] = BloomConfig.from_json_file(_SCREAMING_SNAKE_CASE ) if shard_model: __UpperCamelCase :int = os.listdir(_SCREAMING_SNAKE_CASE ) __UpperCamelCase :Dict = sorted(filter(lambda SCREAMING_SNAKE_CASE : s.startswith('''layer''' ) and "model_00" in s , _SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :Union[str, Any] = {'''weight_map''': {}, '''metadata''': {}} __UpperCamelCase :Optional[Any] = 0 __UpperCamelCase :Dict = None __UpperCamelCase :Dict = BloomConfig() for j, file in enumerate(_SCREAMING_SNAKE_CASE ): print('''Processing file: {}'''.format(_SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :List[Any] = None for i in range(_SCREAMING_SNAKE_CASE ): # load all TP files __UpperCamelCase :Union[str, Any] = file.replace('''model_00''' , f"""model_0{i}""" ) __UpperCamelCase :Tuple = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , map_location='''cpu''' ) # Rename keys in the transformers names __UpperCamelCase :Tuple = list(temp.keys() ) for key in keys: __UpperCamelCase :int = temp.pop(_SCREAMING_SNAKE_CASE ) if tensors is None: __UpperCamelCase :Optional[Any] = temp else: for key in tensors.keys(): if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __UpperCamelCase :List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __UpperCamelCase :Union[str, Any] = torch.cat([tensors[key], temp[key]] , dim=_SCREAMING_SNAKE_CASE ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __UpperCamelCase :int = tensors[key] / pretraining_tp torch.save( _SCREAMING_SNAKE_CASE , os.path.join( _SCREAMING_SNAKE_CASE , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_SCREAMING_SNAKE_CASE ) ).zfill(5 ) ) , ) , ) for key in tensors.keys(): __UpperCamelCase :List[str] = tensors[key] total_size += value.numel() * get_dtype_size(value.dtype ) if key not in index_dict["weight_map"]: __UpperCamelCase :List[Any] = '''pytorch_model_{}-of-{}.bin'''.format( str(j + 1 ).zfill(5 ) , str(len(_SCREAMING_SNAKE_CASE ) ).zfill(5 ) ) __UpperCamelCase :Tuple = BloomConfig() __UpperCamelCase :int = pytorch_dump_folder_path + '''/''' + CONFIG_NAME __UpperCamelCase :str = total_size with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) with open(os.path.join(_SCREAMING_SNAKE_CASE , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f: __UpperCamelCase :Tuple = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '''\n''' f.write(_SCREAMING_SNAKE_CASE ) else: __UpperCamelCase :str = BloomModel(_SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[Any] = os.listdir(_SCREAMING_SNAKE_CASE ) __UpperCamelCase :Tuple = sorted(filter(lambda SCREAMING_SNAKE_CASE : s.startswith('''layer''' ) and "model_00" in s , _SCREAMING_SNAKE_CASE ) ) __UpperCamelCase :Any = None for i, file in enumerate(_SCREAMING_SNAKE_CASE ): __UpperCamelCase :int = None for i in range(_SCREAMING_SNAKE_CASE ): # load all TP files __UpperCamelCase :str = file.replace('''model_00''' , f"""model_0{i}""" ) __UpperCamelCase :Optional[Any] = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , map_location='''cpu''' ) # Rename keys in the transformers names __UpperCamelCase :Optional[int] = list(temp.keys() ) for key in keys: __UpperCamelCase :str = temp.pop(_SCREAMING_SNAKE_CASE ) if tensors is None: __UpperCamelCase :int = temp else: for key in tensors.keys(): # We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425) if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): tensors[key] += temp[key] else: # Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel __UpperCamelCase :Dict = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0 # We concatenate these weights accross TP ranks __UpperCamelCase :Optional[Any] = torch.cat([tensors[key], temp[key]] , dim=_SCREAMING_SNAKE_CASE ) # Divide by the number of TP the weights we want to average for key in tensors.keys(): if any(key.endswith(_SCREAMING_SNAKE_CASE ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ): __UpperCamelCase :Optional[int] = tensors[key] / pretraining_tp __UpperCamelCase :List[Any] = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected""" if missing_keys is None: __UpperCamelCase :int = set(other_keys.missing_keys ) else: __UpperCamelCase :Optional[Any] = missing_keys.intersection(set(other_keys.missing_keys ) ) assert not missing_keys, f"""The keys {missing_keys} are missing""" # Save pytorch-model os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) __UpperCamelCase :List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME __UpperCamelCase :Optional[int] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" ) if config.torch_dtype is not None: __UpperCamelCase :Tuple = model.to(config.torch_dtype ) torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE ) print(f"""Save configuration file to {pytorch_config_dump_path}""" ) with open(_SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--bloom_checkpoint_path''', default=None, type=str, required=True, help='''Path to the Megatron-LM checkpoint path.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--bloom_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--shard_model''', action='''store_true''', help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''', ) parser.add_argument( '''--pretraining_tp''', default=4, type=int, help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''', ) __lowercase = parser.parse_args() convert_bloom_checkpoint_to_pytorch( args.bloom_checkpoint_path, args.bloom_config_file, args.pytorch_dump_folder_path, args.shard_model, args.pretraining_tp, )
43
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class A: '''simple docstring''' UpperCamelCase = None UpperCamelCase = None UpperCamelCase = None # sigma(t_i) @classmethod def a__ ( cls : Tuple ) -> Union[str, Any]: """simple docstring""" return cls() @dataclass class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = 42 UpperCamelCase = 42 UpperCamelCase = 42 class A( UpperCamelCase , UpperCamelCase ): '''simple docstring''' @property def a__ ( self : int ) -> List[Any]: """simple docstring""" return True @register_to_config def __init__( self : Optional[Any] , A_ : float = 0.02 , A_ : float = 100 , A_ : float = 1.007 , A_ : float = 80 , A_ : float = 0.05 , A_ : float = 50 , ) -> Optional[Any]: """simple docstring""" pass def a__ ( self : List[str] ) -> int: """simple docstring""" return KarrasVeSchedulerState.create() def a__ ( self : str , A_ : KarrasVeSchedulerState , A_ : int , A_ : Tuple = () ) -> KarrasVeSchedulerState: """simple docstring""" lowerCamelCase_ = jnp.arange(0 , __UpperCamelCase )[::-1].copy() lowerCamelCase_ = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__UpperCamelCase , schedule=jnp.array(__UpperCamelCase , dtype=jnp.floataa ) , timesteps=__UpperCamelCase , ) def a__ ( self : str , A_ : KarrasVeSchedulerState , A_ : jnp.ndarray , A_ : float , A_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ = random.split(__UpperCamelCase , num=1 ) lowerCamelCase_ = self.config.s_noise * random.normal(key=__UpperCamelCase , shape=sample.shape ) lowerCamelCase_ = sigma + gamma * sigma lowerCamelCase_ = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def a__ ( self : List[str] , A_ : KarrasVeSchedulerState , A_ : jnp.ndarray , A_ : float , A_ : float , A_ : jnp.ndarray , A_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: """simple docstring""" lowerCamelCase_ = sample_hat + sigma_hat * model_output lowerCamelCase_ = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__UpperCamelCase , derivative=__UpperCamelCase , state=__UpperCamelCase ) def a__ ( self : Dict , A_ : KarrasVeSchedulerState , A_ : jnp.ndarray , A_ : float , A_ : float , A_ : jnp.ndarray , A_ : jnp.ndarray , A_ : jnp.ndarray , A_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: """simple docstring""" lowerCamelCase_ = sample_prev + sigma_prev * model_output lowerCamelCase_ = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__UpperCamelCase , derivative=__UpperCamelCase , state=__UpperCamelCase ) def a__ ( self : List[Any] , A_ : KarrasVeSchedulerState , A_ : Any , A_ : Optional[Any] , A_ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" raise NotImplementedError()
204
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ : Optional[int] = { "configuration_instructblip": [ "INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "InstructBlipConfig", "InstructBlipQFormerConfig", "InstructBlipVisionConfig", ], "processing_instructblip": ["InstructBlipProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[str] = [ "INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "InstructBlipQFormerModel", "InstructBlipPreTrainedModel", "InstructBlipForConditionalGeneration", "InstructBlipVisionModel", ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A_ : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
165
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """audio-spectrogram-transformer""" def __init__( self : int , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : int=1_2 , __UpperCamelCase : List[Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Union[str, Any]=1e-12 , __UpperCamelCase : Optional[Any]=1_6 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : int=1_0 , __UpperCamelCase : Optional[int]=1_0 , __UpperCamelCase : str=1_0_2_4 , __UpperCamelCase : Optional[Any]=1_2_8 , **__UpperCamelCase : Any , )->Tuple: super().__init__(**__UpperCamelCase ) _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = patch_size _UpperCAmelCase = qkv_bias _UpperCAmelCase = frequency_stride _UpperCAmelCase = time_stride _UpperCAmelCase = max_length _UpperCAmelCase = num_mel_bins
260
0
import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase__ : Any = "." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) lowercase__ : List[Any] = [ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Union[str, Any]: lowerCAmelCase = SavedModel() lowerCAmelCase = [] with open(os.path.join(_SCREAMING_SNAKE_CASE , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f: lowerCAmelCase = json.load(_SCREAMING_SNAKE_CASE )['''opsets'''] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(_SCREAMING_SNAKE_CASE )] ) with open(_SCREAMING_SNAKE_CASE , '''rb''' ) as f: saved_model.ParseFromString(f.read() ) lowerCAmelCase = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowerCAmelCase = sorted(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(_SCREAMING_SNAKE_CASE ) if strict and len(_SCREAMING_SNAKE_CASE ) > 0: raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops ) elif len(_SCREAMING_SNAKE_CASE ) > 0: print(f"Found the following incompatible ops for the opset {opset}:" ) print(*_SCREAMING_SNAKE_CASE , sep='''\n''' ) else: print(f"The saved model {saved_model_path} can properly be converted with ONNX." ) if __name__ == "__main__": lowercase__ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) lowercase__ : List[str] = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
338
"""simple docstring""" def lowercase ( ): '''simple docstring''' _UpperCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] _UpperCAmelCase = 6 _UpperCAmelCase = 1 _UpperCAmelCase = 1901 _UpperCAmelCase = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 _UpperCAmelCase = day - 29 else: if day > days_per_month[month - 1]: month += 1 _UpperCAmelCase = day - days_per_month[month - 2] if month > 12: year += 1 _UpperCAmelCase = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
260
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A =logging.get_logger(__name__) A ={ "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json", # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class _a ( __a ): __a : List[Any] = """gpt_neox""" def __init__( self : Tuple , lowercase : Tuple=50_432 , lowercase : Optional[int]=6_144 , lowercase : Optional[int]=44 , lowercase : str=64 , lowercase : Tuple=24_576 , lowercase : Optional[Any]="gelu" , lowercase : List[str]=0.25 , lowercase : Dict=10_000 , lowercase : str=0.0 , lowercase : Optional[int]=0.0 , lowercase : List[str]=0.1 , lowercase : List[Any]=2_048 , lowercase : Dict=0.02 , lowercase : str=1E-5 , lowercase : Union[str, Any]=True , lowercase : Union[str, Any]=0 , lowercase : Tuple=2 , lowercase : Tuple=False , lowercase : List[Any]=True , lowercase : Optional[int]=None , **lowercase : Dict , ): '''simple docstring''' super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) UpperCAmelCase = vocab_size UpperCAmelCase = max_position_embeddings UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = rotary_pct UpperCAmelCase = rotary_emb_base UpperCAmelCase = attention_dropout UpperCAmelCase = hidden_dropout UpperCAmelCase = classifier_dropout UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = use_cache UpperCAmelCase = tie_word_embeddings UpperCAmelCase = use_parallel_residual UpperCAmelCase = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( '''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' ) def A ( self : List[str] ): '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( '''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ''' f"got {self.rope_scaling}" ) UpperCAmelCase = self.rope_scaling.get('''type''' , __UpperCamelCase ) UpperCAmelCase = self.rope_scaling.get('''factor''' , __UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
34
"""simple docstring""" from __future__ import annotations import math def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(_SCREAMING_SNAKE_CASE ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [n] for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' if len(str(_SCREAMING_SNAKE_CASE ) ) > 3: if not is_prime(int(str(_SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(_SCREAMING_SNAKE_CASE )[:3] ) ): return False return True def lowercase ( _SCREAMING_SNAKE_CASE : int = 11 ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 13 while len(_SCREAMING_SNAKE_CASE ) != count: if validate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = list_truncated_nums(_SCREAMING_SNAKE_CASE ) if all(is_prime(_SCREAMING_SNAKE_CASE ) for i in list_nums ): list_truncated_primes.append(_SCREAMING_SNAKE_CASE ) num += 2 return list_truncated_primes def lowercase ( ): '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
260
0
"""simple docstring""" import argparse from ...utils.dataclasses import ( ComputeEnvironment, DistributedType, DynamoBackend, PrecisionType, SageMakerDistributedType, ) from ..menu import BulletMenu _a : Tuple= [ "EAGER", "AOT_EAGER", "INDUCTOR", "NVFUSER", "AOT_NVFUSER", "AOT_CUDAGRAPHS", "OFI", "FX2TRT", "ONNXRT", "IPEX", ] def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Union[str, Any]=None ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = True while ask_again: __snake_case : Dict = input(_SCREAMING_SNAKE_CASE ) try: if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0: return default return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result except Exception: if error_message is not None: print(_SCREAMING_SNAKE_CASE ) def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=[] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : str=0 ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __snake_case : int = menu.run(default_choice=_SCREAMING_SNAKE_CASE ) return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result def __UpperCAmelCase ( UpperCAmelCase_ : Dict ) -> Union[str, Any]: '''simple docstring''' __snake_case : str = int(_SCREAMING_SNAKE_CASE ) return ComputeEnvironment(['LOCAL_MACHINE', 'AMAZON_SAGEMAKER'][value] ) def __UpperCAmelCase ( UpperCAmelCase_ : List[str] ) -> Tuple: '''simple docstring''' __snake_case : Tuple = int(_SCREAMING_SNAKE_CASE ) return DistributedType(['NO', 'MULTI_CPU', 'MULTI_XPU', 'MULTI_GPU', 'MULTI_NPU', 'TPU'][value] ) def __UpperCAmelCase ( UpperCAmelCase_ : List[str] ) -> Dict: '''simple docstring''' __snake_case : Any = int(_SCREAMING_SNAKE_CASE ) return DynamoBackend(DYNAMO_BACKENDS[value] ).value def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> int: '''simple docstring''' __snake_case : Optional[int] = int(_SCREAMING_SNAKE_CASE ) return PrecisionType(['no', 'fp16', 'bf16', 'fp8'][value] ) def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[int] = int(_SCREAMING_SNAKE_CASE ) return SageMakerDistributedType(['NO', 'DATA_PARALLEL', 'MODEL_PARALLEL'][value] ) def __UpperCAmelCase ( UpperCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' return {"yes": True, "no": False}[value.lower()] class UpperCamelCase ( argparse.RawDescriptionHelpFormatter ): def _lowercase (self : Any , _A : Any , _A : List[Any] , _A : Any , _A : List[str]) -> int: __snake_case : int = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) __snake_case : Optional[Any] = usage.replace('<command> [<args>] ' , '') return usage
172
"""simple docstring""" import argparse import json import os import sys import tempfile import unittest from argparse import Namespace from dataclasses import dataclass, field from enum import Enum from pathlib import Path from typing import List, Literal, Optional import yaml from transformers import HfArgumentParser, TrainingArguments from transformers.hf_argparser import make_choice_type_function, string_to_bool # Since Python 3.10, we can use the builtin `|` operator for Union types # See PEP 604: https://peps.python.org/pep-0604 __A : str = sys.version_info >= (3, 10) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Tuple=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=_SCREAMING_SNAKE_CASE ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """titi""" UpperCamelCase__ = """toto""" UpperCamelCase__ = 42 @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : Tuple )->Optional[int]: _UpperCAmelCase = BasicEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" def lowercase__ ( self : List[str] )->List[Any]: _UpperCAmelCase = MixedTypeEnum(self.foo ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[1, 2, 3]) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) UpperCamelCase__ = list_field(default=[0.1, 0.2, 0.3]) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field() UpperCamelCase__ = field() UpperCamelCase__ = field() def lowercase__ ( self : int )->str: _UpperCAmelCase = BasicEnum(self.required_enum ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = 42 UpperCamelCase__ = field() UpperCamelCase__ = None UpperCamelCase__ = field(default="""toto""" , metadata={"""help""": """help message"""}) UpperCamelCase__ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""]) if is_python_no_less_than_3_10: @dataclass class _a : """simple docstring""" UpperCamelCase__ = False UpperCamelCase__ = True UpperCamelCase__ = None @dataclass class _a : """simple docstring""" UpperCamelCase__ = None UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """help message"""}) UpperCamelCase__ = None UpperCamelCase__ = list_field(default=[]) UpperCamelCase__ = list_field(default=[]) class _a ( unittest.TestCase): """simple docstring""" def lowercase__ ( self : int , __UpperCamelCase : argparse.ArgumentParser , __UpperCamelCase : argparse.ArgumentParser )->Dict: self.assertEqual(len(a._actions ) , len(b._actions ) ) for x, y in zip(a._actions , b._actions ): _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} _UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k != '''container'''} # Choices with mixed type have custom function as "type" # So we need to compare results directly for equality if xx.get('''choices''' , __UpperCamelCase ) and yy.get('''choices''' , __UpperCamelCase ): for expected_choice in yy["choices"] + xx["choices"]: self.assertEqual(xx['''type'''](__UpperCamelCase ) , yy['''type'''](__UpperCamelCase ) ) del xx["type"], yy["type"] self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--bar''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--baz''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--flag''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5'''] ((_UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(__UpperCamelCase , look_for_args_file=__UpperCamelCase ) self.assertFalse(example.flag ) def lowercase__ ( self : Dict )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=4_2 , type=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Tuple )->List[str]: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) expected.add_argument('''--baz''' , type=__UpperCamelCase , default=__UpperCamelCase , const=__UpperCamelCase , nargs='''?''' ) # A boolean no_* argument always has to come after its "default: True" regular counter-part # and its default must be set to False expected.add_argument('''--no_baz''' , action='''store_false''' , default=__UpperCamelCase , dest='''baz''' ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) _UpperCAmelCase = [WithDefaultBoolExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , baz=__UpperCamelCase , opt=__UpperCamelCase ) ) def lowercase__ ( self : Optional[Any] )->str: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 4_2] , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.toto ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.titi ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) _UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0] self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo ) def lowercase__ ( self : List[str] )->List[str]: @dataclass class _a : """simple docstring""" UpperCamelCase__ = "toto" _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument( '''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 4_2) , type=make_choice_type_function(['''titi''', '''toto''', 4_2] ) , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(args.foo , '''toto''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] ) self.assertEqual(args.foo , '''titi''' ) _UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] ) self.assertEqual(args.foo , 4_2 ) def lowercase__ ( self : int )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=__UpperCamelCase ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual( __UpperCamelCase , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , ) _UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) ) def lowercase__ ( self : Union[str, Any] )->Tuple: _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--bar''' , default=__UpperCamelCase , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--baz''' , default=__UpperCamelCase , type=__UpperCamelCase ) expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=__UpperCamelCase ) _UpperCAmelCase = [OptionalExample] if is_python_no_less_than_3_10: dataclass_types.append(__UpperCamelCase ) for dataclass_type in dataclass_types: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_args([] ) self.assertEqual(__UpperCamelCase , Namespace(foo=__UpperCamelCase , bar=__UpperCamelCase , baz=__UpperCamelCase , ces=[] , des=[] ) ) _UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() ) self.assertEqual(__UpperCamelCase , Namespace(foo=1_2 , bar=3.1_4 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) ) def lowercase__ ( self : Any )->int: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--required_list''' , nargs='''+''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument('''--required_str''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : str )->List[Any]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = argparse.ArgumentParser() expected.add_argument('''--foo''' , type=__UpperCamelCase , required=__UpperCamelCase ) expected.add_argument( '''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=__UpperCamelCase , ) expected.add_argument('''--opt''' , type=__UpperCamelCase , default=__UpperCamelCase ) expected.add_argument('''--baz''' , default='''toto''' , type=__UpperCamelCase , help='''help message''' ) expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=__UpperCamelCase ) self.argparsersEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } _UpperCAmelCase = parser.parse_dict(__UpperCamelCase )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, '''extra''': 4_2, } self.assertRaises(__UpperCamelCase , parser.parse_dict , __UpperCamelCase , allow_extra_keys=__UpperCamelCase ) def lowercase__ ( self : Optional[Any] )->Optional[int]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_json''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.json''' , '''w+''' ) as f: json.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : Union[str, Any] )->Any: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) _UpperCAmelCase = { '''foo''': 1_2, '''bar''': 3.1_4, '''baz''': '''42''', '''flag''': True, } with tempfile.TemporaryDirectory() as tmp_dir: _UpperCAmelCase = os.path.join(__UpperCamelCase , '''temp_yaml''' ) os.mkdir(__UpperCamelCase ) with open(temp_local_path + '''.yaml''' , '''w+''' ) as f: yaml.dump(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0] _UpperCAmelCase = BasicExample(**__UpperCamelCase ) self.assertEqual(__UpperCamelCase , __UpperCamelCase ) def lowercase__ ( self : int )->List[str]: _UpperCAmelCase = HfArgumentParser(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase )
260
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : Optional[int] = logging.get_logger(__name__) _lowerCamelCase : Optional[int] = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = '''open-llama''' def __init__(self : Any , _lowerCAmelCase : int=10_0000 , _lowerCAmelCase : str=4096 , _lowerCAmelCase : Optional[int]=1_1008 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : str=32 , _lowerCAmelCase : str="silu" , _lowerCAmelCase : Optional[Any]=2048 , _lowerCAmelCase : Optional[int]=0.02 , _lowerCAmelCase : List[str]=1e-6 , _lowerCAmelCase : Any=True , _lowerCAmelCase : Optional[int]=0 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=2 , _lowerCAmelCase : int=False , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Tuple=None , **_lowerCAmelCase : Tuple , ): A = vocab_size A = max_position_embeddings A = hidden_size A = intermediate_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = initializer_range A = rms_norm_eps A = use_cache A = kwargs.pop( """use_memorry_efficient_attention""" , __UpperCamelCase ) A = hidden_dropout_prob A = attention_dropout_prob A = use_stable_embedding A = shared_input_output_embedding A = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , ) def A (self : Optional[int] ): if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2: raise ValueError( """`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """ F"""got {self.rope_scaling}""" ) A = self.rope_scaling.get("""type""" , __UpperCamelCase ) A = self.rope_scaling.get("""factor""" , __UpperCamelCase ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" ) if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0: raise ValueError(F"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] _UpperCAmelCase = True for i in range(_SCREAMING_SNAKE_CASE ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: _UpperCAmelCase = True if a[i].islower(): _UpperCAmelCase = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
260
0
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class __lowerCAmelCase : __lowerCamelCase = None def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _lowerCAmelCase = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , __UpperCamelCase ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = os.path.join(__UpperCamelCase , """feat_extract.json""" ) feat_extract_first.to_json_file(__UpperCamelCase ) _lowerCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _lowerCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0] check_json_file_has_correct_format(__UpperCamelCase ) _lowerCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def snake_case ( self ): """simple docstring""" _lowerCAmelCase = self.feature_extraction_class() self.assertIsNotNone(__UpperCamelCase )
82
"""simple docstring""" import random def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase = a[left_index] _UpperCAmelCase = left_index + 1 for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ): if a[j] < pivot: _UpperCAmelCase , _UpperCAmelCase = a[i], a[j] i += 1 _UpperCAmelCase , _UpperCAmelCase = a[i - 1], a[left_index] return i - 1 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ): '''simple docstring''' if left < right: _UpperCAmelCase = random.randint(_SCREAMING_SNAKE_CASE , right - 1 ) _UpperCAmelCase , _UpperCAmelCase = ( a[left], a[pivot], ) # switches the pivot with the left most bound _UpperCAmelCase = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) quick_sort_random( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point quick_sort_random( _SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point def lowercase ( ): '''simple docstring''' _UpperCAmelCase = input('''Enter numbers separated by a comma:\n''' ).strip() _UpperCAmelCase = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(''',''' )] quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) ) print(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor __A : Optional[Any] = logging.get_logger(__name__) class _UpperCAmelCase ( _A ): def __init__( self : Union[str, Any] , *A : int , **A : Dict ) -> None: warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __UpperCamelCase , ) super().__init__(*__UpperCamelCase , **__UpperCamelCase )
33
"""simple docstring""" import numpy as np import torch from torch.nn import CrossEntropyLoss from transformers import AutoModelForCausalLM, AutoTokenizer import datasets from datasets import logging __A : Union[str, Any] = "\\n\n" __A : Any = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n" __A : List[str] = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class _a ( datasets.Metric): """simple docstring""" def lowercase__ ( self : List[Any] )->Union[str, Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''input_texts''': datasets.Value('''string''' ), } ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , ) def lowercase__ ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : int = 1_6 , __UpperCamelCase : bool = True , __UpperCamelCase : List[Any]=None )->Any: if device is not None: assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu." if device == "gpu": _UpperCAmelCase = '''cuda''' else: _UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' _UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase ) _UpperCAmelCase = model.to(__UpperCamelCase ) _UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase ) # if batch_size > 1 (which generally leads to padding being required), and # if there is not an already assigned pad_token, assign an existing # special token to also be the padding token if tokenizer.pad_token is None and batch_size > 1: _UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() ) # check that the model already has at least one special token defined assert ( len(__UpperCamelCase ) > 0 ), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1." # assign one of the special tokens to also be the pad token tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} ) if add_start_token: # leave room for <BOS> token to be added: assert ( tokenizer.bos_token is not None ), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False" _UpperCAmelCase = model.config.max_length - 1 else: _UpperCAmelCase = model.config.max_length _UpperCAmelCase = tokenizer( __UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors='''pt''' , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase ) _UpperCAmelCase = encodings['''input_ids'''] _UpperCAmelCase = encodings['''attention_mask'''] # check that each input is long enough: if add_start_token: assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long." else: assert torch.all( torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings." _UpperCAmelCase = [] _UpperCAmelCase = CrossEntropyLoss(reduction='''none''' ) for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ): _UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) ) _UpperCAmelCase = encoded_texts[start_index:end_index] _UpperCAmelCase = attn_masks[start_index:end_index] if add_start_token: _UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase ) _UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 ) _UpperCAmelCase = torch.cat( [torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 ) _UpperCAmelCase = encoded_batch with torch.no_grad(): _UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits _UpperCAmelCase = out_logits[..., :-1, :].contiguous() _UpperCAmelCase = labels[..., 1:].contiguous() _UpperCAmelCase = attn_mask[..., 1:].contiguous() _UpperCAmelCase = torch.expa( (loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 ) / shift_attention_mask_batch.sum(1 ) ) ppls += perplexity_batch.tolist() return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )}
260
0
"""simple docstring""" from __future__ import annotations import time lowercase__ = list[tuple[int, int]] lowercase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class __snake_case : def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any: '''simple docstring''' a__: Any = pos_x a__: Dict = pos_y a__: Union[str, Any] = (pos_y, pos_x) a__: List[str] = goal_x a__: Optional[Any] = goal_y a__: Optional[int] = parent class __snake_case : def __init__( self , lowercase , lowercase) -> str: '''simple docstring''' a__: Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , __UpperCamelCase) a__: Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __UpperCamelCase) a__: List[str] = [self.start] a__: Dict = False def lowerCamelCase_ ( self) -> Path | None: '''simple docstring''' while self.node_queue: a__: List[Any] = self.node_queue.pop(0) if current_node.pos == self.target.pos: a__: List[Any] = True return self.retrace_path(__UpperCamelCase) a__: Optional[int] = self.get_successors(__UpperCamelCase) for node in successors: self.node_queue.append(__UpperCamelCase) if not self.reached: return [self.start.pos] return None def lowerCamelCase_ ( self , lowercase) -> list[Node]: '''simple docstring''' a__: Optional[Any] = [] for action in delta: a__: str = parent.pos_x + action[1] a__: Optional[int] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__UpperCamelCase) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__UpperCamelCase , __UpperCamelCase , self.target.pos_y , self.target.pos_x , __UpperCamelCase)) return successors def lowerCamelCase_ ( self , lowercase) -> Path: '''simple docstring''' a__: Optional[Any] = node a__: Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x)) a__: List[str] = current_node.parent path.reverse() return path class __snake_case : def __init__( self , lowercase , lowercase) -> Dict: '''simple docstring''' a__: Tuple = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase) a__: List[str] = BreadthFirstSearch(__UpperCamelCase , __UpperCamelCase) a__: int = False def lowerCamelCase_ ( self) -> Path | None: '''simple docstring''' while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: a__: List[str] = self.fwd_bfs.node_queue.pop(0) a__: Tuple = self.bwd_bfs.node_queue.pop(0) if current_bwd_node.pos == current_fwd_node.pos: a__: Optional[Any] = True return self.retrace_bidirectional_path( __UpperCamelCase , __UpperCamelCase) a__: int = current_bwd_node a__: Any = current_fwd_node a__: List[str] = { self.fwd_bfs: self.fwd_bfs.get_successors(__UpperCamelCase), self.bwd_bfs: self.bwd_bfs.get_successors(__UpperCamelCase), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__UpperCamelCase) if not self.reached: return [self.fwd_bfs.start.pos] return None def lowerCamelCase_ ( self , lowercase , lowercase) -> Path: '''simple docstring''' a__: Union[str, Any] = self.fwd_bfs.retrace_path(__UpperCamelCase) a__: Optional[int] = self.bwd_bfs.retrace_path(__UpperCamelCase) bwd_path.pop() bwd_path.reverse() a__: Union[str, Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowercase__ = (0, 0) lowercase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowercase__ = time.time() lowercase__ = BreadthFirstSearch(init, goal) lowercase__ = bfs.search() lowercase__ = time.time() - start_bfs_time print('Unidirectional BFS computation time : ', bfs_time) lowercase__ = time.time() lowercase__ = BidirectionalBreadthFirstSearch(init, goal) lowercase__ = bd_bfs.search() lowercase__ = time.time() - start_bd_bfs_time print('Bidirectional BFS computation time : ', bd_bfs_time)
290
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins __A : int = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"] def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowercase ( _SCREAMING_SNAKE_CASE : Dict ): '''simple docstring''' config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase = test_hf_cache_home / '''datasets''' _UpperCAmelCase = test_hf_cache_home / '''metrics''' _UpperCAmelCase = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_SCREAMING_SNAKE_CASE ) ) @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='''session''' ) def lowercase ( ): '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=_SCREAMING_SNAKE_CASE ) def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ): '''simple docstring''' monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _SCREAMING_SNAKE_CASE ) @pytest.fixture def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): '''simple docstring''' monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _SCREAMING_SNAKE_CASE )
260
0
def A_ ( _lowerCAmelCase ) -> List[Any]: UpperCamelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr UpperCamelCase : Any = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi UpperCamelCase : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list UpperCamelCase : str = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __lowerCamelCase : List[str] = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(pancake_sort(unsorted))
52
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list ): '''simple docstring''' if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst _UpperCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: _UpperCAmelCase , _UpperCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: _UpperCAmelCase = 1 return lst if __name__ == "__main__": __A : Dict = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(gnome_sort(unsorted))
260
0
def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
43
"""simple docstring""" import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __A : int = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : int = 1_6000 ): '''simple docstring''' _UpperCAmelCase = int(round(sample_rate * max_length ) ) if len(_SCREAMING_SNAKE_CASE ) <= sample_length: return wav _UpperCAmelCase = randint(0 , len(_SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _a : """simple docstring""" UpperCamelCase__ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""}) UpperCamelCase__ = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) UpperCamelCase__ = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) UpperCamelCase__ = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) UpperCamelCase__ = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) UpperCamelCase__ = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class _a : """simple docstring""" UpperCamelCase__ = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""}) UpperCamelCase__ = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""}) UpperCamelCase__ = field( default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def lowercase__ ( self : Optional[Any] )->int: if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''will be removed in a future version. Use `--freeze_feature_encoder`''' '''instead. Setting `freeze_feature_encoder==True`.''' , __UpperCamelCase , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( '''The argument `--freeze_feature_extractor` is deprecated and ''' '''should not be used in combination with `--freeze_feature_encoder`.''' '''Only make use of `--freeze_feature_encoder`.''' ) def lowercase ( ): '''simple docstring''' _UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_audio_classification''' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _UpperCAmelCase = training_args.get_process_log_level() logger.setLevel(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} ' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _UpperCAmelCase = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _UpperCAmelCase = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to train from scratch.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset and prepare it for the audio classification task. _UpperCAmelCase = DatasetDict() _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--audio_column_name` to the correct audio column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. ' '''Make sure to set `--label_column_name` to the correct text column - one of ''' f'{", ".join(raw_datasets["train"].column_names )}.' ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _UpperCAmelCase = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _UpperCAmelCase = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _UpperCAmelCase = feature_extractor.model_input_names[0] def train_transforms(_SCREAMING_SNAKE_CASE : Tuple ): _UpperCAmelCase = [] for audio in batch[data_args.audio_column_name]: _UpperCAmelCase = random_subsample( audio['''array'''] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_SCREAMING_SNAKE_CASE : Optional[int] ): _UpperCAmelCase = [audio['''array'''] for audio in batch[data_args.audio_column_name]] _UpperCAmelCase = feature_extractor(_SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _UpperCAmelCase = {model_input_name: inputs.get(_SCREAMING_SNAKE_CASE )} _UpperCAmelCase = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _UpperCAmelCase = raw_datasets['''train'''].features[data_args.label_column_name].names _UpperCAmelCase , _UpperCAmelCase = {}, {} for i, label in enumerate(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = label # Load the accuracy metric from the datasets package _UpperCAmelCase = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_SCREAMING_SNAKE_CASE : List[str] ): _UpperCAmelCase = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _UpperCAmelCase = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_SCREAMING_SNAKE_CASE ) , labelaid=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , finetuning_task='''audio-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _UpperCAmelCase = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _UpperCAmelCase = ( raw_datasets['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _UpperCAmelCase = ( raw_datasets['''eval'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_SCREAMING_SNAKE_CASE , output_all_columns=_SCREAMING_SNAKE_CASE ) # Initialize our trainer _UpperCAmelCase = Trainer( model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=raw_datasets['''train'''] if training_args.do_train else None , eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _UpperCAmelCase = None if training_args.resume_from_checkpoint is not None: _UpperCAmelCase = training_args.resume_from_checkpoint elif last_checkpoint is not None: _UpperCAmelCase = last_checkpoint _UpperCAmelCase = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _UpperCAmelCase = trainer.evaluate() trainer.log_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) trainer.save_metrics('''eval''' , _SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _UpperCAmelCase = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''audio-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''audio-classification'''], } if training_args.push_to_hub: trainer.push_to_hub(**_SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
from __future__ import annotations from collections import deque class A: '''simple docstring''' def __init__( self : Any , A_ : list[str] ) -> List[str]: """simple docstring""" lowerCamelCase_ = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(__UpperCamelCase ) self.set_fail_transitions() def a__ ( self : Any , A_ : int , A_ : str ) -> int | None: """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def a__ ( self : Union[str, Any] , A_ : str ) -> None: """simple docstring""" lowerCamelCase_ = 0 for character in keyword: lowerCamelCase_ = self.find_next_state(__UpperCamelCase , __UpperCamelCase ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) lowerCamelCase_ = len(self.adlist ) - 1 else: lowerCamelCase_ = next_state self.adlist[current_state]["output"].append(__UpperCamelCase ) def a__ ( self : str ) -> None: """simple docstring""" lowerCamelCase_ = deque() for node in self.adlist[0]["next_states"]: q.append(__UpperCamelCase ) lowerCamelCase_ = 0 while q: lowerCamelCase_ = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__UpperCamelCase ) lowerCamelCase_ = self.adlist[r]['fail_state'] while ( self.find_next_state(__UpperCamelCase , self.adlist[child]['value'] ) is None and state != 0 ): lowerCamelCase_ = self.adlist[state]['fail_state'] lowerCamelCase_ = self.find_next_state( __UpperCamelCase , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: lowerCamelCase_ = 0 lowerCamelCase_ = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def a__ ( self : Optional[Any] , A_ : str ) -> dict[str, list[int]]: """simple docstring""" lowerCamelCase_ = {} # returns a dict with keywords and list of its occurrences lowerCamelCase_ = 0 for i in range(len(__UpperCamelCase ) ): while ( self.find_next_state(__UpperCamelCase , string[i] ) is None and current_state != 0 ): lowerCamelCase_ = self.adlist[current_state]['fail_state'] lowerCamelCase_ = self.find_next_state(__UpperCamelCase , string[i] ) if next_state is None: lowerCamelCase_ = 0 else: lowerCamelCase_ = next_state for key in self.adlist[current_state]["output"]: if key not in result: lowerCamelCase_ = [] result[key].append(i - len(__UpperCamelCase ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
204
"""simple docstring""" import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = (DPMSolverSinglestepScheduler,) UpperCamelCase__ = (("""num_inference_steps""", 25),) def lowercase__ ( self : Tuple , **__UpperCamelCase : Tuple )->Any: _UpperCAmelCase = { '''num_train_timesteps''': 1_0_0_0, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''prediction_type''': '''epsilon''', '''thresholding''': False, '''sample_max_value''': 1.0, '''algorithm_type''': '''dpmsolver++''', '''solver_type''': '''midpoint''', '''lambda_min_clipped''': -float('''inf''' ), '''variance_type''': None, } config.update(**__UpperCamelCase ) return config def lowercase__ ( self : Dict , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : Optional[int] )->Tuple: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase , _UpperCAmelCase = sample, sample for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ): _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : Any )->Union[str, Any]: pass def lowercase__ ( self : str , __UpperCamelCase : Tuple=0 , **__UpperCamelCase : List[Any] )->Dict: _UpperCAmelCase = dict(self.forward_default_kwargs ) _UpperCAmelCase = kwargs.pop('''num_inference_steps''' , __UpperCamelCase ) _UpperCAmelCase = self.dummy_sample _UpperCAmelCase = 0.1 * sample _UpperCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: _UpperCAmelCase = self.get_scheduler_config() _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__UpperCamelCase ) _UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(__UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) _UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order] _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample _UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowercase__ ( self : int , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Optional[int] )->List[Any]: if scheduler is None: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample return sample def lowercase__ ( self : List[Any] )->Dict: _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = 5_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter scheduler.set_timesteps(__UpperCamelCase ) # make sure that the first t is uneven for i, t in enumerate(scheduler.timesteps[3:] ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_5_7_4 ) < 1e-3 def lowercase__ ( self : Dict )->Dict: for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_configs(num_train_timesteps=__UpperCamelCase ) def lowercase__ ( self : str )->Optional[Any]: # make sure that iterating over schedulers with same config names gives same results # for defaults _UpperCAmelCase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 _UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config ) _UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config ) _UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->int: self.check_over_configs(thresholding=__UpperCamelCase ) for order in [1, 2, 3]: for solver_type in ["midpoint", "heun"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , algorithm_type='''dpmsolver++''' , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , ) def lowercase__ ( self : str )->str: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCamelCase ) def lowercase__ ( self : List[Any] )->Tuple: for algorithm_type in ["dpmsolver", "dpmsolver++"]: for solver_type in ["midpoint", "heun"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) _UpperCAmelCase = self.full_loop( solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , algorithm_type=__UpperCamelCase , ) assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers" def lowercase__ ( self : Dict )->List[str]: self.check_over_configs(lower_order_final=__UpperCamelCase ) self.check_over_configs(lower_order_final=__UpperCamelCase ) def lowercase__ ( self : Dict )->str: self.check_over_configs(lambda_min_clipped=-float('''inf''' ) ) self.check_over_configs(lambda_min_clipped=-5.1 ) def lowercase__ ( self : List[str] )->int: self.check_over_configs(variance_type=__UpperCamelCase ) self.check_over_configs(variance_type='''learned_range''' ) def lowercase__ ( self : List[str] )->Union[str, Any]: for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]: self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 ) def lowercase__ ( self : List[Any] )->int: _UpperCAmelCase = self.full_loop() _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_7_9_1 ) < 1e-3 def lowercase__ ( self : List[str] )->List[str]: _UpperCAmelCase = self.full_loop(use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.2_2_4_8 ) < 1e-3 def lowercase__ ( self : int )->List[Any]: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.1_4_5_3 ) < 1e-3 def lowercase__ ( self : Optional[Any] )->Dict: _UpperCAmelCase = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=__UpperCamelCase ) _UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) ) assert abs(result_mean.item() - 0.0_6_4_9 ) < 1e-3 def lowercase__ ( self : Union[str, Any] )->List[str]: _UpperCAmelCase = self.scheduler_classes[0] _UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 ) _UpperCAmelCase = scheduler_class(**__UpperCamelCase ) _UpperCAmelCase = 1_0 _UpperCAmelCase = self.dummy_model() _UpperCAmelCase = self.dummy_sample_deter.half() scheduler.set_timesteps(__UpperCamelCase ) for i, t in enumerate(scheduler.timesteps ): _UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ) _UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample assert sample.dtype == torch.floataa
260
0
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ArgumentParser( description=( """PyTorch TPU distributed training launch """ """helper utility that will spawn up """ """multiple distributed processes""" ) ) # Optional arguments for the launch helper parser.add_argument("""--num_cores""" , type=_SCREAMING_SNAKE_CASE , default=1 , help="""Number of TPU cores to use (1 or 8).""" ) # positional parser.add_argument( """training_script""" , type=_SCREAMING_SNAKE_CASE , help=( """The full path to the single TPU training """ """program/script to be launched in parallel, """ """followed by all the arguments for the """ """training script""" ) , ) # rest from the training program parser.add_argument("""training_script_args""" , nargs=_SCREAMING_SNAKE_CASE ) return parser.parse_args() def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = parse_args() # Import training_script as a module. SCREAMING_SNAKE_CASE__ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) SCREAMING_SNAKE_CASE__ = script_fpath.stem SCREAMING_SNAKE_CASE__ = importlib.import_module(_SCREAMING_SNAKE_CASE ) # Patch sys.argv SCREAMING_SNAKE_CASE__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
165
"""simple docstring""" from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class _a ( lowerCAmelCase): """simple docstring""" def lowercase__ ( self : List[Any] , __UpperCamelCase : float )->float: return 0.0 def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.abs(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) _UpperCAmelCase = 20 * np.logaa(_SCREAMING_SNAKE_CASE ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) # Display within reasonable bounds _UpperCAmelCase = get_bounds(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel('''Gain (dB)''' ) plt.plot(_SCREAMING_SNAKE_CASE ) plt.show() def lowercase ( _SCREAMING_SNAKE_CASE : FilterType , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = 512 _UpperCAmelCase = [1] + [0] * (size - 1) _UpperCAmelCase = [filter_type.process(_SCREAMING_SNAKE_CASE ) for item in inputs] _UpperCAmelCase = [0] * (samplerate - size) # zero-padding outputs += filler _UpperCAmelCase = np.angle(np.fft.fft(_SCREAMING_SNAKE_CASE ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel('''Frequency (Hz)''' ) plt.xscale('''log''' ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel('''Phase shift (Radians)''' ) plt.plot(np.unwrap(_SCREAMING_SNAKE_CASE , -2 * pi ) ) plt.show()
260
0
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int: if len(_SCREAMING_SNAKE_CASE ) <= 1: return lst lowerCAmelCase = 1 while i < len(_SCREAMING_SNAKE_CASE ): if lst[i - 1] <= lst[i]: i += 1 else: lowerCAmelCase , lowerCAmelCase = lst[i], lst[i - 1] i -= 1 if i == 0: lowerCAmelCase = 1 return lst if __name__ == "__main__": lowercase__ : Dict = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ : List[Any] = [int(item) for item in user_input.split(''',''')] print(gnome_sort(unsorted))
338
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : Dict = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """camembert""" def __init__( self : List[str] , __UpperCamelCase : Union[str, Any]=3_0_5_2_2 , __UpperCamelCase : Optional[Any]=7_6_8 , __UpperCamelCase : Optional[int]=1_2 , __UpperCamelCase : Union[str, Any]=1_2 , __UpperCamelCase : List[Any]=3_0_7_2 , __UpperCamelCase : Dict="gelu" , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : int=5_1_2 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=None , **__UpperCamelCase : Optional[Any] , )->str: super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = hidden_act _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = initializer_range _UpperCAmelCase = layer_norm_eps _UpperCAmelCase = position_embedding_type _UpperCAmelCase = use_cache _UpperCAmelCase = classifier_dropout class _a ( lowerCAmelCase): """simple docstring""" @property def lowercase__ ( self : int )->Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: _UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ] )
260
0
'''simple docstring''' from bisect import bisect from itertools import accumulate def snake_case_ (_a : Dict , _a : Optional[Any] , _a : Optional[int] , _a : Optional[Any] ): UpperCAmelCase = sorted(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , key=lambda _a : x[0] / x[1] , reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase , UpperCAmelCase = [i[0] for i in r], [i[1] for i in r] UpperCAmelCase = list(accumulate(_SCREAMING_SNAKE_CASE ) ) UpperCAmelCase = bisect(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
34
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A : Tuple = logging.get_logger(__name__) __A : List[str] = { "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = """poolformer""" def __init__( self : List[str] , __UpperCamelCase : int=3 , __UpperCamelCase : List[Any]=1_6 , __UpperCamelCase : str=1_6 , __UpperCamelCase : List[Any]=3 , __UpperCamelCase : int=4.0 , __UpperCamelCase : str=[2, 2, 6, 2] , __UpperCamelCase : Tuple=[6_4, 1_2_8, 3_2_0, 5_1_2] , __UpperCamelCase : int=[7, 3, 3, 3] , __UpperCamelCase : str=[4, 2, 2, 2] , __UpperCamelCase : Union[str, Any]=[2, 1, 1, 1] , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=1e-5 , __UpperCamelCase : str=0.0_2 , **__UpperCamelCase : List[Any] , )->Dict: _UpperCAmelCase = num_channels _UpperCAmelCase = patch_size _UpperCAmelCase = stride _UpperCAmelCase = padding _UpperCAmelCase = pool_size _UpperCAmelCase = hidden_sizes _UpperCAmelCase = mlp_ratio _UpperCAmelCase = depths _UpperCAmelCase = patch_sizes _UpperCAmelCase = strides _UpperCAmelCase = num_encoder_blocks _UpperCAmelCase = drop_path_rate _UpperCAmelCase = hidden_act _UpperCAmelCase = use_layer_scale _UpperCAmelCase = layer_scale_init_value _UpperCAmelCase = initializer_range super().__init__(**__UpperCamelCase ) class _a ( lowerCAmelCase): """simple docstring""" UpperCamelCase__ = version.parse("""1.11""") @property def lowercase__ ( self : Union[str, Any] )->Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowercase__ ( self : Tuple )->float: return 2e-3
260
0
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class UpperCamelCase ( lowercase ): def __init__(self : Dict , _A : Optional[Any] , _A : str=13 , _A : Optional[int]=7 , _A : Optional[int]=True , _A : Optional[Any]=True , _A : Optional[int]=False , _A : Any=True , _A : Union[str, Any]=99 , _A : Tuple=32 , _A : int=5 , _A : List[Any]=4 , _A : Optional[Any]=64 , _A : Any="gelu" , _A : Dict=0.1 , _A : int=0.1 , _A : Union[str, Any]=5_12 , _A : Optional[Any]=16 , _A : Optional[Any]=2 , _A : Union[str, Any]=0.02 , _A : str=3 , _A : Union[str, Any]=4 , _A : List[str]=None , _A : Any=2 , _A : Dict=2 , _A : List[str]=2 , _A : str=2 , _A : str=4 , _A : Dict=1 , ) -> Tuple: __snake_case : List[str] = parent __snake_case : List[Any] = batch_size __snake_case : Tuple = seq_length __snake_case : str = is_training __snake_case : Dict = use_input_mask __snake_case : str = use_token_type_ids __snake_case : Any = use_labels __snake_case : List[Any] = vocab_size __snake_case : List[Any] = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : List[Any] = num_attention_heads __snake_case : List[str] = intermediate_size __snake_case : str = hidden_act __snake_case : str = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : Any = max_position_embeddings __snake_case : Union[str, Any] = type_vocab_size __snake_case : Dict = type_sequence_label_size __snake_case : int = initializer_range __snake_case : Union[str, Any] = num_labels __snake_case : Optional[Any] = num_choices __snake_case : Any = scope __snake_case : List[Any] = q_groups __snake_case : str = k_groups __snake_case : str = v_groups __snake_case : int = post_attention_groups __snake_case : Optional[Any] = intermediate_groups __snake_case : Union[str, Any] = output_groups def _lowercase (self : Union[str, Any]) -> List[Any]: __snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) __snake_case : Optional[Any] = None if self.use_input_mask: __snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length]) __snake_case : Dict = None __snake_case : Dict = None __snake_case : List[str] = None if self.use_labels: __snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) __snake_case : int = ids_tensor([self.batch_size] , self.num_choices) __snake_case : Optional[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase (self : Optional[Any]) -> List[Any]: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def _lowercase (self : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : str , _A : str , _A : Any) -> Optional[Any]: __snake_case : Any = SqueezeBertModel(config=__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Dict = model(__UpperCamelCase , __UpperCamelCase) __snake_case : Optional[int] = model(__UpperCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowercase (self : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : int , _A : str , _A : Tuple) -> Tuple: __snake_case : List[str] = SqueezeBertForMaskedLM(config=__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : str = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _lowercase (self : List[Any] , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : str , _A : Tuple) -> Optional[Any]: __snake_case : Union[str, Any] = SqueezeBertForQuestionAnswering(config=__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Dict = model( __UpperCamelCase , attention_mask=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _lowercase (self : Any , _A : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : Any , _A : Union[str, Any] , _A : Optional[Any]) -> Optional[int]: __snake_case : Dict = self.num_labels __snake_case : Union[str, Any] = SqueezeBertForSequenceClassification(__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowercase (self : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] , _A : Tuple , _A : Tuple) -> Optional[Any]: __snake_case : str = self.num_labels __snake_case : Optional[int] = SqueezeBertForTokenClassification(config=__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Optional[Any] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _lowercase (self : List[Any] , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : int) -> Optional[Any]: __snake_case : Union[str, Any] = self.num_choices __snake_case : Optional[int] = SqueezeBertForMultipleChoice(config=__UpperCamelCase) model.to(__UpperCamelCase) model.eval() __snake_case : Optional[int] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __snake_case : List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() __snake_case : Dict = model( __UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _lowercase (self : Union[str, Any]) -> Tuple: __snake_case : List[Any] = self.prepare_config_and_inputs() ((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) : Optional[Any] = config_and_inputs __snake_case : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ): UpperCAmelCase : str = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) UpperCAmelCase : int = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase : List[Any] = False UpperCAmelCase : int = True UpperCAmelCase : List[Any] = False def _lowercase (self : Dict) -> int: __snake_case : Any = SqueezeBertModelTester(self) __snake_case : Tuple = ConfigTester(self , config_class=__UpperCamelCase , dim=37) def _lowercase (self : List[Any]) -> List[Any]: self.config_tester.run_common_tests() def _lowercase (self : Optional[Any]) -> Tuple: __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__UpperCamelCase) def _lowercase (self : Tuple) -> str: __snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__UpperCamelCase) def _lowercase (self : Dict) -> Union[str, Any]: __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__UpperCamelCase) def _lowercase (self : Any) -> Any: __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__UpperCamelCase) def _lowercase (self : Union[str, Any]) -> List[Any]: __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__UpperCamelCase) def _lowercase (self : List[str]) -> int: __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__UpperCamelCase) @slow def _lowercase (self : Optional[Any]) -> Dict: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Dict = SqueezeBertModel.from_pretrained(__UpperCamelCase) self.assertIsNotNone(__UpperCamelCase) @require_sentencepiece @require_tokenizers @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _lowercase (self : List[str]) -> Optional[int]: __snake_case : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli') __snake_case : Any = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]]) __snake_case : Tuple = model(__UpperCamelCase)[0] __snake_case : int = torch.Size((1, 3)) self.assertEqual(output.shape , __UpperCamelCase) __snake_case : Optional[int] = torch.tensor([[0.6_401, -0.0_349, -0.6_041]]) self.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-4))
172
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __A : Union[str, Any] = 16 __A : Optional[Any] = 32 def lowercase ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ): '''simple docstring''' _UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(_SCREAMING_SNAKE_CASE : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase = datasets.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(_SCREAMING_SNAKE_CASE : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase = 8 else: _UpperCAmelCase = None return tokenizer.pad( _SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase = DataLoader( tokenized_datasets['''train'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = DataLoader( tokenized_datasets['''validation'''] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders __A : Optional[int] = mocked_dataloaders # noqa: F811 def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , _SCREAMING_SNAKE_CASE ) == "1": _UpperCAmelCase = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase = config['''lr'''] _UpperCAmelCase = int(config['''num_epochs'''] ) _UpperCAmelCase = int(config['''seed'''] ) _UpperCAmelCase = int(config['''batch_size'''] ) set_seed(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase , _UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE ) # Instantiate scheduler _UpperCAmelCase = get_linear_schedule_with_warmup( optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split('''.''' )[0] accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(_SCREAMING_SNAKE_CASE ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase = 0 for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase = loss / gradient_accumulation_steps accelerator.backward(_SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , ) _UpperCAmelCase = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'epoch {epoch}:' , _SCREAMING_SNAKE_CASE ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(_SCREAMING_SNAKE_CASE ), '''epoch''': epoch, } , step=_SCREAMING_SNAKE_CASE , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase ( ): '''simple docstring''' _UpperCAmelCase = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=_SCREAMING_SNAKE_CASE , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase = parser.parse_args() _UpperCAmelCase = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
260
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : List[Any] = logging.get_logger(__name__) _lowerCamelCase : Any = { "google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/config.json", "google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/config.json" # See all FNet models at https://huggingface.co/models?filter=fnet } class __UpperCAmelCase ( A__ ): '''simple docstring''' __lowerCAmelCase = '''fnet''' def __init__(self : Optional[int] , _lowerCAmelCase : int=3_2000 , _lowerCAmelCase : Dict=768 , _lowerCAmelCase : int=12 , _lowerCAmelCase : List[str]=3072 , _lowerCAmelCase : int="gelu_new" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : str=512 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : List[Any]=1e-12 , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=512 , _lowerCAmelCase : int=3 , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : int=2 , **_lowerCAmelCase : Optional[Any] , ): super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase ) A = vocab_size A = max_position_embeddings A = hidden_size A = num_hidden_layers A = intermediate_size A = hidden_act A = hidden_dropout_prob A = initializer_range A = type_vocab_size A = layer_norm_eps A = use_tpu_fourier_optimizations A = tpu_short_seq_length
258
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : set ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ), len(grid[0] ) if ( min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _UpperCAmelCase = 0 count += depth_first_search(_SCREAMING_SNAKE_CASE , row + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , row - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col + 1 , _SCREAMING_SNAKE_CASE ) count += depth_first_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , col - 1 , _SCREAMING_SNAKE_CASE ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
260
0
def _UpperCAmelCase ( snake_case ): """simple docstring""" def merge(snake_case , snake_case ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_SCREAMING_SNAKE_CASE ) <= 1: return collection _lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() A__ = input("""Enter numbers separated by a comma:\n""").strip() A__ = [int(item) for item in user_input.split(""",""")] print(*merge_sort(unsorted), sep=""",""")
82
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def lowercase ( _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def lowercase ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): '''simple docstring''' _UpperCAmelCase = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue _UpperCAmelCase = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) _UpperCAmelCase = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) _UpperCAmelCase = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) _UpperCAmelCase = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) _UpperCAmelCase = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) _UpperCAmelCase = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) _UpperCAmelCase = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) _UpperCAmelCase = key.replace('''image_encoder.module''' , '''flava.image_model''' ) _UpperCAmelCase = key.replace('''text_encoder.module''' , '''flava.text_model''' ) _UpperCAmelCase = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) _UpperCAmelCase = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) _UpperCAmelCase = key.replace('''text_projection''' , '''flava.text_projection''' ) _UpperCAmelCase = key.replace('''image_projection''' , '''flava.image_projection''' ) _UpperCAmelCase = value.float() for key, value in codebook_state_dict.items(): _UpperCAmelCase = value return upgrade @torch.no_grad() def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int]=None ): '''simple docstring''' if config_path is not None: _UpperCAmelCase = FlavaConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) else: _UpperCAmelCase = FlavaConfig() _UpperCAmelCase = FlavaForPreTraining(_SCREAMING_SNAKE_CASE ).eval() _UpperCAmelCase = convert_dalle_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , save_checkpoint=_SCREAMING_SNAKE_CASE ) if os.path.exists(_SCREAMING_SNAKE_CASE ): _UpperCAmelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) else: _UpperCAmelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' ) _UpperCAmelCase = upgrade_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) hf_model.load_state_dict(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = hf_model.state_dict() _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = count_parameters(_SCREAMING_SNAKE_CASE ) + count_parameters(_SCREAMING_SNAKE_CASE ) assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ) hf_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __A : Dict = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint") parser.add_argument("--codebook_path", default=None, type=str, help="Path to flava codebook checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") __A : Optional[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
260
0
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed __A : Optional[int] = "true" def lowercase ( __snake_case : Optional[int] , __snake_case : List[str]=8_2 , __snake_case : Union[str, Any]=1_6 ): set_seed(4_2 ) lowercase_ : List[str] = RegressionModel() lowercase_ : List[Any] = deepcopy(_SCREAMING_SNAKE_CASE ) lowercase_ : Dict = RegressionDataset(length=_SCREAMING_SNAKE_CASE ) lowercase_ : Dict = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) model.to(accelerator.device ) lowercase_ , lowercase_ : List[Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return model, ddp_model, dataloader def lowercase ( __snake_case : Accelerator , __snake_case : Tuple=False ): lowercase_ : str = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' ) lowercase_ : str = load_dataset('''glue''' , '''mrpc''' , split='''validation''' ) def tokenize_function(__snake_case : Union[str, Any] ): lowercase_ : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE ) return outputs with accelerator.main_process_first(): lowercase_ : Any = dataset.map( _SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) lowercase_ : Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(__snake_case : Optional[Any] ): if use_longest: return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='''longest''' , return_tensors='''pt''' ) return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' ) return DataLoader(_SCREAMING_SNAKE_CASE , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=1_6 ) def lowercase ( __snake_case : List[Any] , __snake_case : Optional[int] ): lowercase_ : List[Any] = Accelerator(dispatch_batches=_SCREAMING_SNAKE_CASE , split_batches=_SCREAMING_SNAKE_CASE ) lowercase_ : int = get_dataloader(_SCREAMING_SNAKE_CASE , not dispatch_batches ) lowercase_ : Any = AutoModelForSequenceClassification.from_pretrained( '''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=_SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowercase ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int ): lowercase_ : Dict = [] for batch in dataloader: lowercase_ , lowercase_ : Union[str, Any] = batch.values() with torch.no_grad(): lowercase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Tuple = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) lowercase_ , lowercase_ : List[str] = [], [] for logit, targ in logits_and_targets: logits.append(_SCREAMING_SNAKE_CASE ) targs.append(_SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Optional[int] = torch.cat(_SCREAMING_SNAKE_CASE ), torch.cat(_SCREAMING_SNAKE_CASE ) return logits, targs def lowercase ( __snake_case : Accelerator , __snake_case : Dict=8_2 , __snake_case : Union[str, Any]=False , __snake_case : str=False , __snake_case : Union[str, Any]=1_6 ): lowercase_ , lowercase_ , lowercase_ : Tuple = get_basic_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : str = generate_predictions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) assert ( len(_SCREAMING_SNAKE_CASE ) == num_samples ), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_SCREAMING_SNAKE_CASE )}''' def lowercase ( __snake_case : bool = False , __snake_case : bool = False ): lowercase_ : List[str] = evaluate.load('''glue''' , '''mrpc''' ) lowercase_ , lowercase_ : List[Any] = get_mrpc_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # First do baseline lowercase_ , lowercase_ , lowercase_ : Tuple = setup['''no'''] model.to(_SCREAMING_SNAKE_CASE ) model.eval() for batch in dataloader: batch.to(_SCREAMING_SNAKE_CASE ) with torch.inference_mode(): lowercase_ : Any = model(**_SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=_SCREAMING_SNAKE_CASE , references=batch['''labels'''] ) lowercase_ : int = metric.compute() # Then do distributed lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = setup['''ddp'''] model.eval() for batch in dataloader: with torch.inference_mode(): lowercase_ : List[Any] = model(**_SCREAMING_SNAKE_CASE ) lowercase_ : Any = outputs.logits.argmax(dim=-1 ) lowercase_ : Union[str, Any] = batch['''labels'''] lowercase_ , lowercase_ : Dict = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n''' def lowercase ( ): lowercase_ : Any = Accelerator(split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print('''**Testing gather_for_metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' ) test_mrpc(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test torch metrics**''' ) for split_batches in [True, False]: for dispatch_batches in [True, False]: lowercase_ : Dict = Accelerator(split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE ) if accelerator.is_local_main_process: print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' ) test_torch_metrics(_SCREAMING_SNAKE_CASE , 9_9 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print('''**Test last batch is not dropped when perfectly divisible**''' ) lowercase_ : int = Accelerator() test_torch_metrics(_SCREAMING_SNAKE_CASE , 5_1_2 ) accelerator.state._reset_state() def lowercase ( __snake_case : Any ): main() if __name__ == "__main__": main()
33
"""simple docstring""" import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase ( _SCREAMING_SNAKE_CASE : Features ): '''simple docstring''' _UpperCAmelCase = np.inf def set_batch_size(_SCREAMING_SNAKE_CASE : FeatureType ) -> None: nonlocal batch_size if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and feature.dtype == "binary": _UpperCAmelCase = min(_SCREAMING_SNAKE_CASE , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return None if batch_size is np.inf else batch_size class _a ( lowerCAmelCase): """simple docstring""" def __init__( self : Optional[Any] , __UpperCamelCase : NestedDataStructureLike[PathLike] , __UpperCamelCase : Optional[NamedSplit] = None , __UpperCamelCase : Optional[Features] = None , __UpperCamelCase : str = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : int , )->Union[str, Any]: super().__init__( __UpperCamelCase , split=__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase , streaming=__UpperCamelCase , num_proc=__UpperCamelCase , **__UpperCamelCase , ) _UpperCAmelCase = path_or_paths if isinstance(__UpperCamelCase , __UpperCamelCase ) else {self.split: path_or_paths} _UpperCAmelCase = _PACKAGED_DATASETS_MODULES['''parquet'''][1] _UpperCAmelCase = Parquet( cache_dir=__UpperCamelCase , data_files=__UpperCamelCase , features=__UpperCamelCase , hash=__UpperCamelCase , **__UpperCamelCase , ) def lowercase__ ( self : Union[str, Any] )->Dict: # Build iterable dataset if self.streaming: _UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None self.builder.download_and_prepare( download_config=__UpperCamelCase , download_mode=__UpperCamelCase , verification_mode=__UpperCamelCase , base_path=__UpperCamelCase , num_proc=self.num_proc , ) _UpperCAmelCase = self.builder.as_dataset( split=self.split , verification_mode=__UpperCamelCase , in_memory=self.keep_in_memory ) return dataset class _a : """simple docstring""" def __init__( self : Optional[int] , __UpperCamelCase : Dataset , __UpperCamelCase : Union[PathLike, BinaryIO] , __UpperCamelCase : Optional[int] = None , **__UpperCamelCase : Tuple , )->Optional[int]: _UpperCAmelCase = dataset _UpperCAmelCase = path_or_buf _UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features ) _UpperCAmelCase = parquet_writer_kwargs def lowercase__ ( self : Optional[int] )->int: _UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , '''wb+''' ) as buffer: _UpperCAmelCase = self._write(file_obj=__UpperCamelCase , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) else: _UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__UpperCamelCase , **self.parquet_writer_kwargs ) return written def lowercase__ ( self : int , __UpperCamelCase : BinaryIO , __UpperCamelCase : int , **__UpperCamelCase : int )->int: _UpperCAmelCase = 0 _UpperCAmelCase = parquet_writer_kwargs.pop('''path_or_buf''' , __UpperCamelCase ) _UpperCAmelCase = self.dataset.features.arrow_schema _UpperCAmelCase = pq.ParquetWriter(__UpperCamelCase , schema=__UpperCamelCase , **__UpperCamelCase ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __UpperCamelCase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ): _UpperCAmelCase = query_table( table=self.dataset._data , key=slice(__UpperCamelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__UpperCamelCase ) written += batch.nbytes writer.close() return written
260
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json", "umberto-commoncrawl-cased-v1": ( "https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json" ), "umberto-wikipedia-uncased-v1": ( "https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json" ), } class __snake_case ( __lowerCAmelCase ): a__ = """camembert""" def __init__( self , lowercase=3_05_22 , lowercase=7_68 , lowercase=12 , lowercase=12 , lowercase=30_72 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_12 , lowercase=2 , lowercase=0.02 , lowercase=1e-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ) -> str: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase) a__: List[Any] = vocab_size a__: Tuple = hidden_size a__: str = num_hidden_layers a__: Union[str, Any] = num_attention_heads a__: int = hidden_act a__: Union[str, Any] = intermediate_size a__: Optional[Any] = hidden_dropout_prob a__: Union[str, Any] = attention_probs_dropout_prob a__: Tuple = max_position_embeddings a__: Dict = type_vocab_size a__: List[Any] = initializer_range a__: Tuple = layer_norm_eps a__: Union[str, Any] = position_embedding_type a__: Optional[Any] = use_cache a__: str = classifier_dropout class __snake_case ( __lowerCAmelCase ): @property def lowerCamelCase_ ( self) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": a__: Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: a__: Tuple = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ])
290
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str = " " ): '''simple docstring''' _UpperCAmelCase = [] _UpperCAmelCase = 0 for index, char in enumerate(_SCREAMING_SNAKE_CASE ): if char == separator: split_words.append(string[last_index:index] ) _UpperCAmelCase = index + 1 elif index + 1 == len(_SCREAMING_SNAKE_CASE ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
260
0
import os def A_ ( ) -> str: UpperCamelCase : str = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , "num.txt" ) with open(_SCREAMING_SNAKE_CASE ) as file_hand: return str(sum(int(_SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
52
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase ( _SCREAMING_SNAKE_CASE : Any ): '''simple docstring''' _UpperCAmelCase = args.pruning_method _UpperCAmelCase = args.threshold _UpperCAmelCase = args.model_name_or_path.rstrip('''/''' ) _UpperCAmelCase = args.target_model_path print(f'Load fine-pruned model from {model_name_or_path}' ) _UpperCAmelCase = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) _UpperCAmelCase = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "classifier" in name or "qa_output" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) elif "bias" in name: _UpperCAmelCase = tensor print(f'Copied layer {name}' ) else: if pruning_method == "magnitude": _UpperCAmelCase = MagnitudeBinarizer.apply(inputs=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "topK": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = TopKBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase = ThresholdBinarizer.apply(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) elif pruning_method == "l0": if "mask_scores" in name: continue _UpperCAmelCase = name[:-6] _UpperCAmelCase = model[f'{prefix_}mask_scores'] _UpperCAmelCase , _UpperCAmelCase = -0.1, 1.1 _UpperCAmelCase = torch.sigmoid(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = s * (r - l) + l _UpperCAmelCase = s_bar.clamp(min=0.0 , max=1.0 ) _UpperCAmelCase = tensor * mask print(f'Pruned layer {name}' ) else: raise ValueError('''Unknown pruning method''' ) if target_model_path is None: _UpperCAmelCase = os.path.join( os.path.dirname(_SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(_SCREAMING_SNAKE_CASE )}' ) if not os.path.isdir(_SCREAMING_SNAKE_CASE ): shutil.copytree(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print(f'\nCreated folder {target_model_path}' ) torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) ) print('''\nPruned model saved! See you later!''' ) if __name__ == "__main__": __A : Tuple = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __A : Optional[int] = parser.parse_args() main(args)
260
0
from __future__ import annotations def lowerCamelCase ( SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :List[Any] = len(_SCREAMING_SNAKE_CASE ) # We need to create solution object to save path. __UpperCamelCase :Optional[Any] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )] __UpperCamelCase :List[Any] = run_maze(_SCREAMING_SNAKE_CASE , 0 , 0 , _SCREAMING_SNAKE_CASE ) if solved: print('''\n'''.join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) ) else: print('''No solution exists!''' ) return solved def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCamelCase :int = len(_SCREAMING_SNAKE_CASE ) # Final check point. if i == j == (size - 1): __UpperCamelCase :Tuple = 1 return True __UpperCamelCase :Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds __UpperCamelCase :str = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. __UpperCamelCase :int = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited __UpperCamelCase :Any = 1 # check for directions if ( run_maze(_SCREAMING_SNAKE_CASE , i + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j + 1 , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or run_maze(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - 1 , _SCREAMING_SNAKE_CASE ) ): return True __UpperCamelCase :str = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
43
"""simple docstring""" def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ): '''simple docstring''' _UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) while cur > 1: # Find the maximum number in arr _UpperCAmelCase = arr.index(max(arr[0:cur] ) ) # Reverse from 0 to mi _UpperCAmelCase = arr[mi::-1] + arr[mi + 1 : len(_SCREAMING_SNAKE_CASE )] # Reverse whole list _UpperCAmelCase = arr[cur - 1 :: -1] + arr[cur : len(_SCREAMING_SNAKE_CASE )] cur -= 1 return arr if __name__ == "__main__": __A : List[str] = input("Enter numbers separated by a comma:\n").strip() __A : List[Any] = [int(item) for item in user_input.split(",")] print(pancake_sort(unsorted))
260
0
from typing import Any def _SCREAMING_SNAKE_CASE ( lowercase : list , lowercase : list , lowercase : dict , lowercase : dict , lowercase : dict , ): '''simple docstring''' _validation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) # Creates data structures and fill initial step lowerCamelCase_ = {} lowerCamelCase_ = {} for state in states_space: lowerCamelCase_ = observations_space[0] lowerCamelCase_ = ( initial_probabilities[state] * emission_probabilities[state][observation] ) lowerCamelCase_ = None # Fills the data structure with the probabilities of # different transitions and pointers to previous states for o in range(1 , len(_SCREAMING_SNAKE_CASE ) ): lowerCamelCase_ = observations_space[o] lowerCamelCase_ = observations_space[o - 1] for state in states_space: # Calculates the argmax for probability function lowerCamelCase_ = '' lowerCamelCase_ = -1 for k_state in states_space: lowerCamelCase_ = ( probabilities[(k_state, prior_observation)] * transition_probabilities[k_state][state] * emission_probabilities[state][observation] ) if probability > max_probability: lowerCamelCase_ = probability lowerCamelCase_ = k_state # Update probabilities and pointers dicts lowerCamelCase_ = ( probabilities[(arg_max, prior_observation)] * transition_probabilities[arg_max][state] * emission_probabilities[state][observation] ) lowerCamelCase_ = arg_max # The final observation lowerCamelCase_ = observations_space[len(_SCREAMING_SNAKE_CASE ) - 1] # argmax for given final observation lowerCamelCase_ = '' lowerCamelCase_ = -1 for k_state in states_space: lowerCamelCase_ = probabilities[(k_state, final_observation)] if probability > max_probability: lowerCamelCase_ = probability lowerCamelCase_ = k_state lowerCamelCase_ = arg_max # Process pointers backwards lowerCamelCase_ = last_state lowerCamelCase_ = [] for o in range(len(_SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ): result.append(_SCREAMING_SNAKE_CASE ) lowerCamelCase_ = pointers[previous, observations_space[o]] result.reverse() return result def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ): '''simple docstring''' _validate_not_empty( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) _validate_lists(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _validate_dicts( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , lowercase : Any , ): '''simple docstring''' if not all( [ observations_space, states_space, initial_probabilities, transition_probabilities, emission_probabilities, ] ): raise ValueError('There\'s an empty parameter' ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Any ): '''simple docstring''' _validate_list(_SCREAMING_SNAKE_CASE , 'observations_space' ) _validate_list(_SCREAMING_SNAKE_CASE , 'states_space' ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str ): '''simple docstring''' if not isinstance(_object , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ = f"""{var_name} must be a list""" raise ValueError(_SCREAMING_SNAKE_CASE ) else: for x in _object: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ = f"""{var_name} must be a list of strings""" raise ValueError(_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : Any , lowercase : Any , ): '''simple docstring''' _validate_dict(_SCREAMING_SNAKE_CASE , 'initial_probabilities' , _SCREAMING_SNAKE_CASE ) _validate_nested_dict(_SCREAMING_SNAKE_CASE , 'transition_probabilities' ) _validate_nested_dict(_SCREAMING_SNAKE_CASE , 'emission_probabilities' ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str ): '''simple docstring''' _validate_dict(_object , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values(): _validate_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : type , lowercase : bool = False ): '''simple docstring''' if not isinstance(_object , _SCREAMING_SNAKE_CASE ): lowerCamelCase_ = f"""{var_name} must be a dict""" raise ValueError(_SCREAMING_SNAKE_CASE ) if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object ): lowerCamelCase_ = f"""{var_name} all keys must be strings""" raise ValueError(_SCREAMING_SNAKE_CASE ) if not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for x in _object.values() ): lowerCamelCase_ = 'nested dictionary ' if nested else '' lowerCamelCase_ = f"""{var_name} {nested_text}all values must be {value_type.__name__}""" raise ValueError(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": from doctest import testmod testmod()
204
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray , _SCREAMING_SNAKE_CASE : np.ndarray ): '''simple docstring''' _UpperCAmelCase = np.zeros_like(_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image _UpperCAmelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): _UpperCAmelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() _UpperCAmelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image __A : str = Path(__file__).resolve().parent / "image_data" / "lena.jpg" __A : str = np.array(Image.open(lena_path)) # kernel to be applied __A : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __A : Optional[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __A : Optional[Any] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
260
0