code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import os from collections.abc import Iterator def _snake_case ( snake_case__ : str = "." ): for dir_path, dir_names, filenames in os.walk(snake_case__ ): A = [d for d in dir_names if d != 'scripts' and d[0] not in '._'] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(snake_case__ )[1] in (".py", ".ipynb"): yield os.path.join(snake_case__ , snake_case__ ).lstrip('./' ) def _snake_case ( snake_case__ : str ): return F'{i * " "}*' if i else "\n##" def _snake_case ( snake_case__ : str , snake_case__ : str ): A = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(snake_case__ ) or old_parts[i] != new_part) and new_part: print(F'{md_prefix(snake_case__ )} {new_part.replace("_" , " " ).title()}' ) return new_path def _snake_case ( snake_case__ : str = "." ): A = '' for filepath in sorted(good_file_paths(snake_case__ ) ): A , A = os.path.split(snake_case__ ) if filepath != old_path: A = print_path(snake_case__ , snake_case__ ) A = (filepath.count(os.sep ) + 1) if filepath else 0 A = F'{filepath}/{filename}'.replace(' ' , '%20' ) A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0] print(F'{md_prefix(snake_case__ )} [{filename}]({url})' ) if __name__ == "__main__": print_directory_md('''.''')
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''glpn''' def __init__( self : Tuple ,A_ : Optional[int]=3 ,A_ : Union[str, Any]=4 ,A_ : Optional[int]=[2, 2, 2, 2] ,A_ : List[str]=[8, 4, 2, 1] ,A_ : List[str]=[32, 64, 160, 256] ,A_ : List[str]=[7, 3, 3, 3] ,A_ : List[Any]=[4, 2, 2, 2] ,A_ : Dict=[1, 2, 5, 8] ,A_ : List[str]=[4, 4, 4, 4] ,A_ : Union[str, Any]="gelu" ,A_ : Optional[Any]=0.0 ,A_ : Dict=0.0 ,A_ : Any=0.02 ,A_ : List[Any]=0.1 ,A_ : Optional[int]=1e-6 ,A_ : int=64 ,A_ : Optional[int]=10 ,A_ : List[str]=-1 ,**A_ : str ,) -> Optional[int]: super().__init__(**A_ ) A = num_channels A = num_encoder_blocks A = depths A = sr_ratios A = hidden_sizes A = patch_sizes A = strides A = mlp_ratios A = num_attention_heads A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = drop_path_rate A = layer_norm_eps A = decoder_hidden_size A = max_depth A = head_in_index
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : int ,A_ : int ,A_ : int=3 ,A_ : Union[str, Any]=7 ,A_ : int=True ,A_ : Tuple=True ,A_ : Dict=False ,A_ : List[str]=True ,A_ : int=99 ,A_ : List[str]=32 ,A_ : int=5 ,A_ : Tuple=4 ,A_ : Dict=37 ,A_ : List[str]="gelu" ,A_ : str=0.1 ,A_ : Dict=0.1 ,A_ : List[Any]=512 ,A_ : Tuple=16 ,A_ : Union[str, Any]=2 ,A_ : List[str]=0.02 ,A_ : str=3 ,A_ : Optional[int]=4 ,A_ : Tuple=None ,) -> Union[str, Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: return FalconConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=A_ ,) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : Union[str, Any] ,A_ : int ,A_ : Any ,A_ : List[str] ,A_ : List[Any] ,A_ : Any ) -> List[Any]: A = FalconModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,) -> Optional[Any]: A = True A = FalconModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Dict ,A_ : int ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : int ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : List[Any] ,) -> str: A = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Union[str, Any] ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Optional[Any] ,A_ : Tuple ,A_ : List[Any] ,A_ : Union[str, Any] ,A_ : Tuple ,) -> List[str]: A = True A = True A = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) _lowerCamelCase: int = (FalconForCausalLM,) if is_torch_available() else () _lowerCamelCase: List[Any] = ( { '''feature-extraction''': FalconModel, '''text-classification''': FalconForSequenceClassification, '''text-generation''': FalconForCausalLM, '''question-answering''': FalconForQuestionAnswering, '''token-classification''': FalconForTokenClassification, '''zero-shot''': FalconForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: str = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = FalconModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: A , *A = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: A = alibi self.model_tester.create_and_check_model(A_ ,*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = input_dict['input_ids'] A = FalconForCausalLM(A_ ) model.to(A_ ) model.eval() A = model(A_ ,use_cache=A_ ) A = input_ids.shape[0] A = model._convert_to_rw_cache(result.past_key_values ) A = model._convert_cache_to_standard_format(A_ ,A_ ) for layer in range(len(A_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: A , A = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(A_ ,'use_cache' ): return A = model_class(A_ ).to(A_ ) if "use_cache" not in inputs: A = True A = model(**A_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return A = ( getattr(A_ ,'decoder_layers' ,A_ ) or getattr(A_ ,'num_decoder_layers' ,A_ ) or config.num_hidden_layers ) A = getattr(A_ ,'num_kv_heads' ,config.num_attention_heads ) A = getattr(A_ ,'d_model' ,config.hidden_size ) A = embed_dim // num_attention_heads A = outputs['past_key_values'] self.assertEqual(len(A_ ) ,A_ ) A , A = inputs['input_ids'].shape for i in range(A_ ): if config.new_decoder_architecture: A = config.num_attention_heads elif config.multi_query: A = 1 self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: A = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) A = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(A_ ) A = tokenizer('My favorite food is' ,return_tensors='pt' ).to(A_ ) A = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) A = model.generate(**A_ ,do_sample=A_ ,max_new_tokens=19 ) A = tokenizer.batch_decode(A_ )[0] self.assertEqual(A_ ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: A = AutoTokenizer.from_pretrained(A_ ) A = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(A_ ) A = tokenizer('My favorite food is' ,return_tensors='pt' ).to(A_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**A_ ,do_sample=A_ ,max_new_tokens=4 ) model.generate(**A_ ,do_sample=A_ ,max_new_tokens=4 ) model.generate(**A_ ,num_beams=2 ,max_new_tokens=4 ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: A = AutoTokenizer.from_pretrained(A_ ) A = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(device=A_ ) A = tokenizer('My favorite food is' ,return_tensors='pt' ).to(A_ ) # Test results are the same with and without cache A = model.generate(**A_ ,do_sample=A_ ,max_new_tokens=20 ,use_cache=A_ ) A = model.generate(**A_ ,do_sample=A_ ,max_new_tokens=20 ,use_cache=A_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def _snake_case ( snake_case__ : Dict ): A = 384 if "tiny" in model_name: A = [3, 3, 9, 3] A = [96, 192, 384, 768] if "small" in model_name: A = [3, 3, 27, 3] A = [96, 192, 384, 768] if "base" in model_name: A = [3, 3, 27, 3] A = [128, 256, 512, 1024] A = 512 if "large" in model_name: A = [3, 3, 27, 3] A = [192, 384, 768, 1536] A = 768 if "xlarge" in model_name: A = [3, 3, 27, 3] A = [256, 512, 1024, 2048] A = 1024 # set label information A = 150 A = 'huggingface/label-files' A = 'ade20k-id2label.json' A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) A = {int(snake_case__ ): v for k, v in idalabel.items()} A = {v: k for k, v in idalabel.items()} A = ConvNextConfig( depths=snake_case__ , hidden_sizes=snake_case__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] ) A = UperNetConfig( backbone_config=snake_case__ , auxiliary_in_channels=snake_case__ , num_labels=snake_case__ , idalabel=snake_case__ , labelaid=snake_case__ , ) return config def _snake_case ( snake_case__ : int ): A = [] # fmt: off # stem rename_keys.append(('backbone.downsample_layers.0.0.weight', 'backbone.embeddings.patch_embeddings.weight') ) rename_keys.append(('backbone.downsample_layers.0.0.bias', 'backbone.embeddings.patch_embeddings.bias') ) rename_keys.append(('backbone.downsample_layers.0.1.weight', 'backbone.embeddings.layernorm.weight') ) rename_keys.append(('backbone.downsample_layers.0.1.bias', 'backbone.embeddings.layernorm.bias') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') ) rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') ) rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') ) rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') ) rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') ) rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') ) rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') ) rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') ) rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') ) if i > 0: rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') ) rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') ) rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') ) rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') ) rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') ) rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') ) # decode head rename_keys.extend( [ ('decode_head.conv_seg.weight', 'decode_head.classifier.weight'), ('decode_head.conv_seg.bias', 'decode_head.classifier.bias'), ('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'), ('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'), ] ) # fmt: on return rename_keys def _snake_case ( snake_case__ : int , snake_case__ : Any , snake_case__ : Union[str, Any] ): A = dct.pop(snake_case__ ) A = val def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] ): A = { 'upernet-convnext-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth', 'upernet-convnext-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth', 'upernet-convnext-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth', 'upernet-convnext-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth', 'upernet-convnext-xlarge': 'https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth', } A = model_name_to_url[model_name] A = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' )['state_dict'] A = get_upernet_config(snake_case__ ) A = UperNetForSemanticSegmentation(snake_case__ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): A = state_dict.pop(snake_case__ ) if "bn" in key: A = key.replace('bn' , 'batch_norm' ) A = val # rename keys A = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) model.load_state_dict(snake_case__ ) # verify on image A = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) A = SegformerImageProcessor() A = processor(snake_case__ , return_tensors='pt' ).pixel_values with torch.no_grad(): A = model(snake_case__ ) if model_name == "upernet-convnext-tiny": A = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ) elif model_name == "upernet-convnext-small": A = torch.tensor( [[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] ) elif model_name == "upernet-convnext-base": A = torch.tensor( [[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] ) elif model_name == "upernet-convnext-large": A = torch.tensor( [[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] ) elif model_name == "upernet-convnext-xlarge": A = torch.tensor( [[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] ) print('Logits:' , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case__ , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving processor to {pytorch_dump_folder_path}' ) processor.save_pretrained(snake_case__ ) if push_to_hub: print(F'Pushing model and processor for {model_name} to hub' ) model.push_to_hub(F'openmmlab/{model_name}' ) processor.push_to_hub(F'openmmlab/{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F"""upernet-convnext-{size}""" for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) _lowercase = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" _lowercase = '''Tobias Carryer''' from time import time class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : List[str] ,A_ : List[Any]=int(time() ) ) -> Union[str, Any]: # noqa: B008 A = multiplier A = increment A = modulo A = seed def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. _lowercase = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated _lowercase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test''']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ _lowercase = '''https://storage.googleapis.com/cvdf-datasets/mnist/''' def _snake_case ( snake_case__ : Optional[int] ): A = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case__ )[0] @deprecated(snake_case__ , 'Please use tf.data to implement this functionality.' ) def _snake_case ( snake_case__ : Dict ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=snake_case__ ) as bytestream: A = _readaa(snake_case__ ) if magic != 2051: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) A = _readaa(snake_case__ ) A = _readaa(snake_case__ ) A = _readaa(snake_case__ ) A = bytestream.read(rows * cols * num_images ) A = numpy.frombuffer(snake_case__ , dtype=numpy.uinta ) A = data.reshape(snake_case__ , snake_case__ , snake_case__ , 1 ) return data @deprecated(snake_case__ , 'Please use tf.one_hot on tensors.' ) def _snake_case ( snake_case__ : Tuple , snake_case__ : str ): A = labels_dense.shape[0] A = numpy.arange(snake_case__ ) * num_classes A = numpy.zeros((num_labels, num_classes) ) A = 1 return labels_one_hot @deprecated(snake_case__ , 'Please use tf.data to implement this functionality.' ) def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=False , snake_case__ : List[str]=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=snake_case__ ) as bytestream: A = _readaa(snake_case__ ) if magic != 2049: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) A = _readaa(snake_case__ ) A = bytestream.read(snake_case__ ) A = numpy.frombuffer(snake_case__ , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(snake_case__ , snake_case__ ) return labels class lowerCAmelCase_ : '''simple docstring''' @deprecated( A_ ,'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' ,) def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Any ,A_ : Dict=False ,A_ : Optional[int]=False ,A_ : List[str]=dtypes.floataa ,A_ : Optional[Any]=True ,A_ : Union[str, Any]=None ,) -> Optional[Any]: A , A = random_seed.get_seed(A_ ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) A = dtypes.as_dtype(A_ ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: A = 1_0000 A = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' A = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 A = images.reshape( images.shape[0] ,images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. A = images.astype(numpy.floataa ) A = numpy.multiply(A_ ,1.0 / 2_55.0 ) A = images A = labels A = 0 A = 0 @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self._images @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: return self._labels @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: return self._num_examples @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self._epochs_completed def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Dict ,A_ : Tuple=False ,A_ : List[Any]=True ) -> Union[str, Any]: if fake_data: A = [1] * 784 A = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(A_ )], [fake_label for _ in range(A_ )], ) A = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: A = numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) A = self.images[perma] A = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch A = self._num_examples - start A = self._images[start : self._num_examples] A = self._labels[start : self._num_examples] # Shuffle the data if shuffle: A = numpy.arange(self._num_examples ) numpy.random.shuffle(A_ ) A = self.images[perm] A = self.labels[perm] # Start next epoch A = 0 A = batch_size - rest_num_examples A = self._index_in_epoch A = self._images[start:end] A = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) ,axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0 ), ) else: self._index_in_epoch += batch_size A = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(snake_case__ , 'Please write your own downloading logic.' ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : List[Any] ): if not gfile.Exists(snake_case__ ): gfile.MakeDirs(snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) if not gfile.Exists(snake_case__ ): urllib.request.urlretrieve(snake_case__ , snake_case__ ) # noqa: S310 with gfile.GFile(snake_case__ ) as f: A = f.size() print('Successfully downloaded' , snake_case__ , snake_case__ , 'bytes.' ) return filepath @deprecated( snake_case__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def _snake_case ( snake_case__ : List[Any] , snake_case__ : Dict=False , snake_case__ : List[Any]=False , snake_case__ : List[str]=dtypes.floataa , snake_case__ : List[Any]=True , snake_case__ : Any=5000 , snake_case__ : Tuple=None , snake_case__ : Optional[int]=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=snake_case__ , one_hot=snake_case__ , dtype=snake_case__ , seed=snake_case__ ) A = fake() A = fake() A = fake() return _Datasets(train=snake_case__ , validation=snake_case__ , test=snake_case__ ) if not source_url: # empty string check A = DEFAULT_SOURCE_URL A = 'train-images-idx3-ubyte.gz' A = 'train-labels-idx1-ubyte.gz' A = 't10k-images-idx3-ubyte.gz' A = 't10k-labels-idx1-ubyte.gz' A = _maybe_download( snake_case__ , snake_case__ , source_url + train_images_file ) with gfile.Open(snake_case__ , 'rb' ) as f: A = _extract_images(snake_case__ ) A = _maybe_download( snake_case__ , snake_case__ , source_url + train_labels_file ) with gfile.Open(snake_case__ , 'rb' ) as f: A = _extract_labels(snake_case__ , one_hot=snake_case__ ) A = _maybe_download( snake_case__ , snake_case__ , source_url + test_images_file ) with gfile.Open(snake_case__ , 'rb' ) as f: A = _extract_images(snake_case__ ) A = _maybe_download( snake_case__ , snake_case__ , source_url + test_labels_file ) with gfile.Open(snake_case__ , 'rb' ) as f: A = _extract_labels(snake_case__ , one_hot=snake_case__ ) if not 0 <= validation_size <= len(snake_case__ ): A = ( 'Validation size should be between 0 and ' F'{len(snake_case__ )}. Received: {validation_size}.' ) raise ValueError(snake_case__ ) A = train_images[:validation_size] A = train_labels[:validation_size] A = train_images[validation_size:] A = train_labels[validation_size:] A = {'dtype': dtype, 'reshape': reshape, 'seed': seed} A = _DataSet(snake_case__ , snake_case__ , **snake_case__ ) A = _DataSet(snake_case__ , snake_case__ , **snake_case__ ) A = _DataSet(snake_case__ , snake_case__ , **snake_case__ ) return _Datasets(train=snake_case__ , validation=snake_case__ , test=snake_case__ )
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import numpy as np from nltk.translate import meteor_score import datasets from datasets.config import importlib_metadata, version _lowercase = version.parse(importlib_metadata.version('''nltk''')) if NLTK_VERSION >= version.Version('''3.6.4'''): from nltk import word_tokenize _lowercase = '''\ @inproceedings{banarjee2005, title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments}, author = {Banerjee, Satanjeev and Lavie, Alon}, booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization}, month = jun, year = {2005}, address = {Ann Arbor, Michigan}, publisher = {Association for Computational Linguistics}, url = {https://www.aclweb.org/anthology/W05-0909}, pages = {65--72}, } ''' _lowercase = '''\ METEOR, an automatic metric for machine translation evaluation that is based on a generalized concept of unigram matching between the machine-produced translation and human-produced reference translations. Unigrams can be matched based on their surface forms, stemmed forms, and meanings; furthermore, METEOR can be easily extended to include more advanced matching strategies. Once all generalized unigram matches between the two strings have been found, METEOR computes a score for this matching using a combination of unigram-precision, unigram-recall, and a measure of fragmentation that is designed to directly capture how well-ordered the matched words in the machine translation are in relation to the reference. METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic data and 0.331 on the Chinese data. This is shown to be an improvement on using simply unigram-precision, unigram-recall and their harmonic F1 combination. ''' _lowercase = ''' Computes METEOR score of translated segments against one or more references. Args: predictions: list of predictions to score. Each prediction should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. alpha: Parameter for controlling relative weights of precision and recall. default: 0.9 beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3 gamma: Relative weight assigned to fragmentation penalty. default: 0.5 Returns: \'meteor\': meteor score. Examples: >>> meteor = datasets.load_metric(\'meteor\') >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"] >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"] >>> results = meteor.compute(predictions=predictions, references=references) >>> print(round(results["meteor"], 4)) 0.6944 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] ,reference_urls=[ 'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score', 'https://en.wikipedia.org/wiki/METEOR', ] ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: import nltk nltk.download('wordnet' ) if NLTK_VERSION >= version.Version('3.6.5' ): nltk.download('punkt' ) if NLTK_VERSION >= version.Version('3.6.6' ): nltk.download('omw-1.4' ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any]=0.9 ,A_ : List[str]=3 ,A_ : Dict=0.5 ) -> Optional[Any]: if NLTK_VERSION >= version.Version('3.6.5' ): A = [ meteor_score.single_meteor_score( word_tokenize(A_ ) ,word_tokenize(A_ ) ,alpha=A_ ,beta=A_ ,gamma=A_ ) for ref, pred in zip(A_ ,A_ ) ] else: A = [ meteor_score.single_meteor_score(A_ ,A_ ,alpha=A_ ,beta=A_ ,gamma=A_ ) for ref, pred in zip(A_ ,A_ ) ] return {"meteor": np.mean(A_ )}
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process _lowercase = logging.getLogger(__name__) _lowercase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) _lowercase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowercase )} , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''The input training data file (a text file).'''} ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) _lowerCamelCase: Optional[str] = field( default=_lowercase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) _lowerCamelCase: float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) _lowerCamelCase: float = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) _lowerCamelCase: int = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) _lowerCamelCase: int = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def _snake_case ( snake_case__ : DataTrainingArguments , snake_case__ : PreTrainedTokenizer , snake_case__ : bool = False , snake_case__ : Optional[str] = None , ): def _dataset(snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , ref_path=snake_case__ , ) return LineByLineTextDataset(tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size ) else: return TextDataset( tokenizer=snake_case__ , file_path=snake_case__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case__ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(snake_case__ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) A , A , A = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , snake_case__ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: A = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: A = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: A = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: A = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: A = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) A = AutoModelWithLMHead.from_config(snake_case__ ) model.resize_token_embeddings(len(snake_case__ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: A = tokenizer.max_len # Our input block size will be the max possible for the model else: A = min(data_args.block_size , tokenizer.max_len ) # Get datasets A = ( get_dataset(snake_case__ , tokenizer=snake_case__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) A = ( get_dataset(snake_case__ , tokenizer=snake_case__ , evaluate=snake_case__ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": A = DataCollatorForPermutationLanguageModeling( tokenizer=snake_case__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: A = DataCollatorForWholeWordMask( tokenizer=snake_case__ , mlm_probability=data_args.mlm_probability ) else: A = DataCollatorForLanguageModeling( tokenizer=snake_case__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer A = Trainer( model=snake_case__ , args=snake_case__ , data_collator=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , prediction_loss_only=snake_case__ , ) # Training if training_args.do_train: A = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=snake_case__ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation A = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) A = trainer.evaluate() A = math.exp(eval_output['eval_loss'] ) A = {'perplexity': perplexity} A = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(snake_case__ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , snake_case__ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(snake_case__ ) return results def _snake_case ( snake_case__ : List[str] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position _lowercase = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip _lowercase = concatenate_datasets _lowercase = DownloadConfig _lowercase = DownloadManager _lowercase = DownloadMode _lowercase = DownloadConfig _lowercase = DownloadMode _lowercase = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" _lowercase = [0, 2, 4, 6, 8] _lowercase = [1, 3, 5, 7, 9] def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : list[int] , snake_case__ : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 A = 0 for digit in range(10 ): A = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , snake_case__ , snake_case__ ) return result A = 0 for digita in range(10 ): A = digita if (remainder + digita) % 2 == 0: A = ODD_DIGITS else: A = EVEN_DIGITS for digita in other_parity_digits: A = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , snake_case__ , snake_case__ , ) return result def _snake_case ( snake_case__ : int = 9 ): A = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(snake_case__ , 0 , [0] * length , snake_case__ ) return result if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from statistics import mean, stdev def _snake_case ( snake_case__ : list , snake_case__ : int = 3 ): A = min(snake_case__ ) A = max(snake_case__ ) # normalize data return [round((x - x_min) / (x_max - x_min) , snake_case__ ) for x in data] def _snake_case ( snake_case__ : list , snake_case__ : int = 3 ): A = mean(snake_case__ ) A = stdev(snake_case__ ) # standardize data return [round((x - mu) / (sigma) , snake_case__ ) for x in data]
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1
"""simple docstring""" import math import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from .attention_processor import Attention from .embeddings import get_timestep_embedding from .modeling_utils import ModelMixin class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' @register_to_config def __init__( self : List[str] ,A_ : int = 128 ,A_ : int = 256 ,A_ : float = 20_00.0 ,A_ : int = 768 ,A_ : int = 12 ,A_ : int = 12 ,A_ : int = 64 ,A_ : int = 2048 ,A_ : float = 0.1 ,) -> str: super().__init__() A = nn.Sequential( nn.Linear(A_ ,d_model * 4 ,bias=A_ ) ,nn.SiLU() ,nn.Linear(d_model * 4 ,d_model * 4 ,bias=A_ ) ,nn.SiLU() ,) A = nn.Embedding(A_ ,A_ ) A = False A = nn.Linear(A_ ,A_ ,bias=A_ ) A = nn.Dropout(p=A_ ) A = nn.ModuleList() for lyr_num in range(A_ ): # FiLM conditional T5 decoder A = DecoderLayer(d_model=A_ ,d_kv=A_ ,num_heads=A_ ,d_ff=A_ ,dropout_rate=A_ ) self.decoders.append(A_ ) A = TaLayerNorm(A_ ) A = nn.Dropout(p=A_ ) A = nn.Linear(A_ ,A_ ,bias=A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ,A_ : int ) -> Any: A = torch.mul(query_input.unsqueeze(-1 ) ,key_input.unsqueeze(-2 ) ) return mask.unsqueeze(-3 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Dict ,A_ : List[Any] ,A_ : Optional[Any] ) -> List[Any]: A , A , A = decoder_input_tokens.shape assert decoder_noise_time.shape == (batch,) # decoder_noise_time is in [0, 1), so rescale to expected timing range. A = get_timestep_embedding( decoder_noise_time * self.config.max_decoder_noise_time ,embedding_dim=self.config.d_model ,max_period=self.config.max_decoder_noise_time ,).to(dtype=self.dtype ) A = self.conditioning_emb(A_ ).unsqueeze(1 ) assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4) A = decoder_input_tokens.shape[1] # If we want to use relative positions for audio context, we can just offset # this sequence by the length of encodings_and_masks. A = torch.broadcast_to( torch.arange(A_ ,device=decoder_input_tokens.device ) ,(batch, seq_length) ,) A = self.position_encoding(A_ ) A = self.continuous_inputs_projection(A_ ) inputs += position_encodings A = self.dropout(A_ ) # decoder: No padding present. A = torch.ones( decoder_input_tokens.shape[:2] ,device=decoder_input_tokens.device ,dtype=inputs.dtype ) # Translate encoding masks to encoder-decoder masks. A = [(x, self.encoder_decoder_mask(A_ ,A_ )) for x, y in encodings_and_masks] # cross attend style: concat encodings A = torch.cat([x[0] for x in encodings_and_encdec_masks] ,dim=1 ) A = torch.cat([x[1] for x in encodings_and_encdec_masks] ,dim=-1 ) for lyr in self.decoders: A = lyr( A_ ,conditioning_emb=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,)[0] A = self.decoder_norm(A_ ) A = self.post_dropout(A_ ) A = self.spec_out(A_ ) return spec_out class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] ,A_ : Dict ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : int ,A_ : Tuple ,A_ : int=1e-6 ) -> Any: super().__init__() A = nn.ModuleList() # cond self attention: layer 0 self.layer.append( TaLayerSelfAttentionCond(d_model=A_ ,d_kv=A_ ,num_heads=A_ ,dropout_rate=A_ ) ) # cross attention: layer 1 self.layer.append( TaLayerCrossAttention( d_model=A_ ,d_kv=A_ ,num_heads=A_ ,dropout_rate=A_ ,layer_norm_epsilon=A_ ,) ) # Film Cond MLP + dropout: last layer self.layer.append( TaLayerFFCond(d_model=A_ ,d_ff=A_ ,dropout_rate=A_ ,layer_norm_epsilon=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : str=None ,A_ : Any=None ,A_ : Optional[Any]=None ,A_ : List[str]=None ,A_ : Optional[int]=None ,) -> Optional[int]: A = self.layer[0]( A_ ,conditioning_emb=A_ ,attention_mask=A_ ,) if encoder_hidden_states is not None: A = torch.where(encoder_attention_mask > 0 ,0 ,-1e10 ).to( encoder_hidden_states.dtype ) A = self.layer[1]( A_ ,key_value_states=A_ ,attention_mask=A_ ,) # Apply Film Conditional Feed Forward layer A = self.layer[-1](A_ ,A_ ) return (hidden_states,) class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : int ,A_ : Optional[int] ,A_ : str ,A_ : int ,A_ : int ) -> Optional[int]: super().__init__() A = TaLayerNorm(A_ ) A = TaFiLMLayer(in_features=d_model * 4 ,out_features=A_ ) A = Attention(query_dim=A_ ,heads=A_ ,dim_head=A_ ,out_bias=A_ ,scale_qk=A_ ) A = nn.Dropout(A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ,A_ : str=None ,A_ : Optional[Any]=None ,) -> List[Any]: # pre_self_attention_layer_norm A = self.layer_norm(A_ ) if conditioning_emb is not None: A = self.FiLMLayer(A_ ,A_ ) # Self-attention block A = self.attention(A_ ) A = hidden_states + self.dropout(A_ ) return hidden_states class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Any ,A_ : Any ,A_ : Any ,A_ : Optional[int] ,A_ : int ,A_ : str ) -> List[str]: super().__init__() A = Attention(query_dim=A_ ,heads=A_ ,dim_head=A_ ,out_bias=A_ ,scale_qk=A_ ) A = TaLayerNorm(A_ ,eps=A_ ) A = nn.Dropout(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Optional[Any]=None ,A_ : Tuple=None ,) -> List[str]: A = self.layer_norm(A_ ) A = self.attention( A_ ,encoder_hidden_states=A_ ,attention_mask=attention_mask.squeeze(1 ) ,) A = hidden_states + self.dropout(A_ ) return layer_output class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] ,A_ : str ,A_ : str ,A_ : List[Any] ,A_ : Tuple ) -> int: super().__init__() A = TaDenseGatedActDense(d_model=A_ ,d_ff=A_ ,dropout_rate=A_ ) A = TaFiLMLayer(in_features=d_model * 4 ,out_features=A_ ) A = TaLayerNorm(A_ ,eps=A_ ) A = nn.Dropout(A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Optional[int] ,A_ : Optional[int]=None ) -> List[str]: A = self.layer_norm(A_ ) if conditioning_emb is not None: A = self.film(A_ ,A_ ) A = self.DenseReluDense(A_ ) A = hidden_states + self.dropout(A_ ) return hidden_states class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] ,A_ : Tuple ,A_ : int ,A_ : str ) -> Optional[int]: super().__init__() A = nn.Linear(A_ ,A_ ,bias=A_ ) A = nn.Linear(A_ ,A_ ,bias=A_ ) A = nn.Linear(A_ ,A_ ,bias=A_ ) A = nn.Dropout(A_ ) A = NewGELUActivation() def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ) -> List[str]: A = self.act(self.wi_a(A_ ) ) A = self.wi_a(A_ ) A = hidden_gelu * hidden_linear A = self.dropout(A_ ) A = self.wo(A_ ) return hidden_states class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[int] ,A_ : Tuple=1e-6 ) -> Any: super().__init__() A = nn.Parameter(torch.ones(A_ ) ) A = eps def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str] ) -> str: # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for # half-precision inputs is done in fp32 A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 ,keepdim=A_ ) A = hidden_states * torch.rsqrt(variance + self.variance_epsilon ) # convert into half-precision if necessary if self.weight.dtype in [torch.floataa, torch.bfloataa]: A = hidden_states.to(self.weight.dtype ) return self.weight * hidden_states class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : torch.Tensor ) -> torch.Tensor: return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(A_ ,3.0 )) )) class lowerCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : str ,A_ : List[Any] ,A_ : str ) -> Tuple: super().__init__() A = nn.Linear(A_ ,out_features * 2 ,bias=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Dict ,A_ : Tuple ) -> List[Any]: A = self.scale_bias(A_ ) A , A = torch.chunk(A_ ,2 ,-1 ) A = x * (1 + scale) + shift return x
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _snake_case ( snake_case__ : int ): A = int(number**0.5 ) return number == sq * sq def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int ): A = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den A = x_den * y_den * z_den A = gcd(snake_case__ , snake_case__ ) top //= hcf bottom //= hcf return top, bottom def _snake_case ( snake_case__ : int = 35 ): A = set() A = 42 A = Fraction(0 ) A = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 A = x_num * y_den + x_den * y_num A = x_den * y_den A = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) # n=2 A = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) A = x_den * x_den * y_den * y_den if is_sq(snake_case__ ) and is_sq(snake_case__ ): A = int(sqrt(snake_case__ ) ) A = int(sqrt(snake_case__ ) ) A = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) # n=-1 A = x_num * y_num A = x_den * y_num + x_num * y_den A = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) # n=2 A = x_num * x_num * y_num * y_num A = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(snake_case__ ) and is_sq(snake_case__ ): A = int(sqrt(snake_case__ ) ) A = int(sqrt(snake_case__ ) ) A = gcd(snake_case__ , snake_case__ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: A = add_three( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) unique_s.add(snake_case__ ) for num, den in unique_s: total += Fraction(snake_case__ , snake_case__ ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool _lowercase = { '''Acehnese Arabic''': '''ace_Arab''', '''Acehnese Latin''': '''ace_Latn''', '''Mesopotamian Arabic''': '''acm_Arab''', '''Ta\'izzi-Adeni Arabic''': '''acq_Arab''', '''Tunisian Arabic''': '''aeb_Arab''', '''Afrikaans''': '''afr_Latn''', '''South Levantine Arabic''': '''ajp_Arab''', '''Akan''': '''aka_Latn''', '''Amharic''': '''amh_Ethi''', '''North Levantine Arabic''': '''apc_Arab''', '''Modern Standard Arabic''': '''arb_Arab''', '''Modern Standard Arabic Romanized''': '''arb_Latn''', '''Najdi Arabic''': '''ars_Arab''', '''Moroccan Arabic''': '''ary_Arab''', '''Egyptian Arabic''': '''arz_Arab''', '''Assamese''': '''asm_Beng''', '''Asturian''': '''ast_Latn''', '''Awadhi''': '''awa_Deva''', '''Central Aymara''': '''ayr_Latn''', '''South Azerbaijani''': '''azb_Arab''', '''North Azerbaijani''': '''azj_Latn''', '''Bashkir''': '''bak_Cyrl''', '''Bambara''': '''bam_Latn''', '''Balinese''': '''ban_Latn''', '''Belarusian''': '''bel_Cyrl''', '''Bemba''': '''bem_Latn''', '''Bengali''': '''ben_Beng''', '''Bhojpuri''': '''bho_Deva''', '''Banjar Arabic''': '''bjn_Arab''', '''Banjar Latin''': '''bjn_Latn''', '''Standard Tibetan''': '''bod_Tibt''', '''Bosnian''': '''bos_Latn''', '''Buginese''': '''bug_Latn''', '''Bulgarian''': '''bul_Cyrl''', '''Catalan''': '''cat_Latn''', '''Cebuano''': '''ceb_Latn''', '''Czech''': '''ces_Latn''', '''Chokwe''': '''cjk_Latn''', '''Central Kurdish''': '''ckb_Arab''', '''Crimean Tatar''': '''crh_Latn''', '''Welsh''': '''cym_Latn''', '''Danish''': '''dan_Latn''', '''German''': '''deu_Latn''', '''Southwestern Dinka''': '''dik_Latn''', '''Dyula''': '''dyu_Latn''', '''Dzongkha''': '''dzo_Tibt''', '''Greek''': '''ell_Grek''', '''English''': '''eng_Latn''', '''Esperanto''': '''epo_Latn''', '''Estonian''': '''est_Latn''', '''Basque''': '''eus_Latn''', '''Ewe''': '''ewe_Latn''', '''Faroese''': '''fao_Latn''', '''Fijian''': '''fij_Latn''', '''Finnish''': '''fin_Latn''', '''Fon''': '''fon_Latn''', '''French''': '''fra_Latn''', '''Friulian''': '''fur_Latn''', '''Nigerian Fulfulde''': '''fuv_Latn''', '''Scottish Gaelic''': '''gla_Latn''', '''Irish''': '''gle_Latn''', '''Galician''': '''glg_Latn''', '''Guarani''': '''grn_Latn''', '''Gujarati''': '''guj_Gujr''', '''Haitian Creole''': '''hat_Latn''', '''Hausa''': '''hau_Latn''', '''Hebrew''': '''heb_Hebr''', '''Hindi''': '''hin_Deva''', '''Chhattisgarhi''': '''hne_Deva''', '''Croatian''': '''hrv_Latn''', '''Hungarian''': '''hun_Latn''', '''Armenian''': '''hye_Armn''', '''Igbo''': '''ibo_Latn''', '''Ilocano''': '''ilo_Latn''', '''Indonesian''': '''ind_Latn''', '''Icelandic''': '''isl_Latn''', '''Italian''': '''ita_Latn''', '''Javanese''': '''jav_Latn''', '''Japanese''': '''jpn_Jpan''', '''Kabyle''': '''kab_Latn''', '''Jingpho''': '''kac_Latn''', '''Kamba''': '''kam_Latn''', '''Kannada''': '''kan_Knda''', '''Kashmiri Arabic''': '''kas_Arab''', '''Kashmiri Devanagari''': '''kas_Deva''', '''Georgian''': '''kat_Geor''', '''Central Kanuri Arabic''': '''knc_Arab''', '''Central Kanuri Latin''': '''knc_Latn''', '''Kazakh''': '''kaz_Cyrl''', '''Kabiyè''': '''kbp_Latn''', '''Kabuverdianu''': '''kea_Latn''', '''Khmer''': '''khm_Khmr''', '''Kikuyu''': '''kik_Latn''', '''Kinyarwanda''': '''kin_Latn''', '''Kyrgyz''': '''kir_Cyrl''', '''Kimbundu''': '''kmb_Latn''', '''Northern Kurdish''': '''kmr_Latn''', '''Kikongo''': '''kon_Latn''', '''Korean''': '''kor_Hang''', '''Lao''': '''lao_Laoo''', '''Ligurian''': '''lij_Latn''', '''Limburgish''': '''lim_Latn''', '''Lingala''': '''lin_Latn''', '''Lithuanian''': '''lit_Latn''', '''Lombard''': '''lmo_Latn''', '''Latgalian''': '''ltg_Latn''', '''Luxembourgish''': '''ltz_Latn''', '''Luba-Kasai''': '''lua_Latn''', '''Ganda''': '''lug_Latn''', '''Luo''': '''luo_Latn''', '''Mizo''': '''lus_Latn''', '''Standard Latvian''': '''lvs_Latn''', '''Magahi''': '''mag_Deva''', '''Maithili''': '''mai_Deva''', '''Malayalam''': '''mal_Mlym''', '''Marathi''': '''mar_Deva''', '''Minangkabau Arabic ''': '''min_Arab''', '''Minangkabau Latin''': '''min_Latn''', '''Macedonian''': '''mkd_Cyrl''', '''Plateau Malagasy''': '''plt_Latn''', '''Maltese''': '''mlt_Latn''', '''Meitei Bengali''': '''mni_Beng''', '''Halh Mongolian''': '''khk_Cyrl''', '''Mossi''': '''mos_Latn''', '''Maori''': '''mri_Latn''', '''Burmese''': '''mya_Mymr''', '''Dutch''': '''nld_Latn''', '''Norwegian Nynorsk''': '''nno_Latn''', '''Norwegian Bokmål''': '''nob_Latn''', '''Nepali''': '''npi_Deva''', '''Northern Sotho''': '''nso_Latn''', '''Nuer''': '''nus_Latn''', '''Nyanja''': '''nya_Latn''', '''Occitan''': '''oci_Latn''', '''West Central Oromo''': '''gaz_Latn''', '''Odia''': '''ory_Orya''', '''Pangasinan''': '''pag_Latn''', '''Eastern Panjabi''': '''pan_Guru''', '''Papiamento''': '''pap_Latn''', '''Western Persian''': '''pes_Arab''', '''Polish''': '''pol_Latn''', '''Portuguese''': '''por_Latn''', '''Dari''': '''prs_Arab''', '''Southern Pashto''': '''pbt_Arab''', '''Ayacucho Quechua''': '''quy_Latn''', '''Romanian''': '''ron_Latn''', '''Rundi''': '''run_Latn''', '''Russian''': '''rus_Cyrl''', '''Sango''': '''sag_Latn''', '''Sanskrit''': '''san_Deva''', '''Santali''': '''sat_Olck''', '''Sicilian''': '''scn_Latn''', '''Shan''': '''shn_Mymr''', '''Sinhala''': '''sin_Sinh''', '''Slovak''': '''slk_Latn''', '''Slovenian''': '''slv_Latn''', '''Samoan''': '''smo_Latn''', '''Shona''': '''sna_Latn''', '''Sindhi''': '''snd_Arab''', '''Somali''': '''som_Latn''', '''Southern Sotho''': '''sot_Latn''', '''Spanish''': '''spa_Latn''', '''Tosk Albanian''': '''als_Latn''', '''Sardinian''': '''srd_Latn''', '''Serbian''': '''srp_Cyrl''', '''Swati''': '''ssw_Latn''', '''Sundanese''': '''sun_Latn''', '''Swedish''': '''swe_Latn''', '''Swahili''': '''swh_Latn''', '''Silesian''': '''szl_Latn''', '''Tamil''': '''tam_Taml''', '''Tatar''': '''tat_Cyrl''', '''Telugu''': '''tel_Telu''', '''Tajik''': '''tgk_Cyrl''', '''Tagalog''': '''tgl_Latn''', '''Thai''': '''tha_Thai''', '''Tigrinya''': '''tir_Ethi''', '''Tamasheq Latin''': '''taq_Latn''', '''Tamasheq Tifinagh''': '''taq_Tfng''', '''Tok Pisin''': '''tpi_Latn''', '''Tswana''': '''tsn_Latn''', '''Tsonga''': '''tso_Latn''', '''Turkmen''': '''tuk_Latn''', '''Tumbuka''': '''tum_Latn''', '''Turkish''': '''tur_Latn''', '''Twi''': '''twi_Latn''', '''Central Atlas Tamazight''': '''tzm_Tfng''', '''Uyghur''': '''uig_Arab''', '''Ukrainian''': '''ukr_Cyrl''', '''Umbundu''': '''umb_Latn''', '''Urdu''': '''urd_Arab''', '''Northern Uzbek''': '''uzn_Latn''', '''Venetian''': '''vec_Latn''', '''Vietnamese''': '''vie_Latn''', '''Waray''': '''war_Latn''', '''Wolof''': '''wol_Latn''', '''Xhosa''': '''xho_Latn''', '''Eastern Yiddish''': '''ydd_Hebr''', '''Yoruba''': '''yor_Latn''', '''Yue Chinese''': '''yue_Hant''', '''Chinese Simplified''': '''zho_Hans''', '''Chinese Traditional''': '''zho_Hant''', '''Standard Malay''': '''zsm_Latn''', '''Zulu''': '''zul_Latn''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''facebook/nllb-200-distilled-600M''' _lowerCamelCase: List[Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowerCamelCase: Any = '''translator''' _lowerCamelCase: List[str] = AutoTokenizer _lowerCamelCase: int = AutoModelForSeqaSeqLM _lowerCamelCase: List[str] = LANGUAGE_CODES _lowerCamelCase: List[Any] = ['''text''', '''text''', '''text'''] _lowerCamelCase: Optional[Any] = ['''text'''] def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,A_ : Union[str, Any] ,A_ : Dict ) -> Tuple: if src_lang not in self.lang_to_code: raise ValueError(F'{src_lang} is not a supported language.' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'{tgt_lang} is not a supported language.' ) A = self.lang_to_code[src_lang] A = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( A_ ,return_tensors='pt' ,src_lang=A_ ,tgt_lang=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Dict ) -> int: return self.model.generate(**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> List[Any]: return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=A_ )
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int = 100 ): A = n * (n + 1) * (2 * n + 1) / 6 A = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def _snake_case ( snake_case__ : List[Any] , snake_case__ : str , snake_case__ : str , snake_case__ : Path , snake_case__ : str = None , snake_case__ : str = None , snake_case__ : str = None , ): if config_name_or_path is None: A = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base' if generator_tokenizer_name_or_path is None: A = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: A = question_encoder_name_or_path A = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration # Save model. A = RagConfig.from_pretrained(snake_case__ ) A = AutoConfig.from_pretrained(snake_case__ ) A = AutoConfig.from_pretrained(snake_case__ ) A = gen_config A = question_encoder_config A = model_class.from_pretrained_question_encoder_generator( snake_case__ , snake_case__ , config=snake_case__ ) rag_model.save_pretrained(snake_case__ ) # Sanity check. model_class.from_pretrained(snake_case__ ) # Save tokenizers. A = AutoTokenizer.from_pretrained(snake_case__ ) gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' ) A = AutoTokenizer.from_pretrained(snake_case__ ) question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument( '''--model_type''', choices=['''rag_sequence''', '''rag_token'''], required=True, type=str, help='''RAG model type: rag_sequence, rag_token''', ) parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''') parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''') parser.add_argument( '''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier''' ) parser.add_argument( '''--generator_tokenizer_name_or_path''', type=str, help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''', ) parser.add_argument( '''--question_encoder_tokenizer_name_or_path''', type=str, help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''', ) parser.add_argument( '''--config_name_or_path''', type=str, help=( '''Identifier of the model config to use, if not provided, resolves to a base config for a given''' ''' ``model_type``''' ), ) _lowercase = parser.parse_args() _lowercase = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import os def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'triangle.txt' ) with open(snake_case__ ) as f: A = f.readlines() A = [] for line in triangle: A = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(snake_case__ ) ) a.append(snake_case__ ) for i in range(1 , len(snake_case__ ) ): for j in range(len(a[i] ) ): A = a[i - 1][j] if j != len(a[i - 1] ) else 0 A = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(snake_case__ , snake_case__ ) return max(a[-1] ) if __name__ == "__main__": print(solution())
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowercase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int ): if divisor % 5 == 0 or divisor % 2 == 0: return 0 A = 1 A = 1 while repunit: A = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def _snake_case ( snake_case__ : int = 100_0000 ): A = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(snake_case__ ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"""{solution() = }""")
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" def _snake_case ( snake_case__ : list[int] ): A = len(snake_case__ ) for i in range(snake_case__ ): for j in range(i + 1 , snake_case__ ): if numbers[j] < numbers[i]: A , A = numbers[j], numbers[i] return numbers if __name__ == "__main__": _lowercase = input('''Enter numbers separated by a comma:\n''').strip() _lowercase = [int(item) for item in user_input.split(''',''')] print(exchange_sort(unsorted))
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''ClapFeatureExtractor''' _lowerCamelCase: int = ('''RobertaTokenizer''', '''RobertaTokenizerFast''') def __init__( self : Dict ,A_ : Any ,A_ : Dict ) -> Tuple: super().__init__(A_ ,A_ ) def __call__( self : Tuple ,A_ : Any=None ,A_ : List[str]=None ,A_ : List[str]=None ,**A_ : Optional[Any] ) -> Dict: A = kwargs.pop('sampling_rate' ,A_ ) if text is None and audios is None: raise ValueError('You have to specify either text or audios. Both cannot be none.' ) if text is not None: A = self.tokenizer(A_ ,return_tensors=A_ ,**A_ ) if audios is not None: A = self.feature_extractor( A_ ,sampling_rate=A_ ,return_tensors=A_ ,**A_ ) if text is not None and audios is not None: A = audio_features.input_features return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A_ ) ,tensor_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,*A_ : int ,**A_ : Union[str, Any] ) -> Optional[int]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,*A_ : Optional[Any] ,**A_ : Optional[int] ) -> List[Any]: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: A = self.tokenizer.model_input_names A = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" import re import string import numpy as np import datasets _lowercase = ''' Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list. ''' _lowercase = ''' Args: predictions: List of predicted texts. references: List of reference texts. regexes_to_ignore: List, defaults to None. Regex expressions of characters to ignore when calculating the exact matches. Note: these regexes are removed from the input data before the changes based on the options below (e.g. ignore_case, ignore_punctuation, ignore_numbers) are applied. ignore_case: Boolean, defaults to False. If true, turns everything to lowercase so that capitalization differences are ignored. ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before comparing predictions and references. Returns: exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive. Examples: >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 25.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 50.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True) >>> print(round(results["exact_match"], 1)) 75.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["the cat", "theater", "YELLING", "agent007"] >>> preds = ["cat?", "theater", "yelling", "agent"] >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True) >>> print(round(results["exact_match"], 1)) 100.0 >>> exact_match = datasets.load_metric("exact_match") >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."] >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."] >>> results = exact_match.compute(references=refs, predictions=preds) >>> print(round(results["exact_match"], 1)) 33.3 ''' _lowercase = ''' ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Value('string' ,id='sequence' ), 'references': datasets.Value('string' ,id='sequence' ), } ) ,reference_urls=[] ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : List[Any] ,A_ : int=None ,A_ : List[Any]=False ,A_ : str=False ,A_ : Dict=False ,) -> List[Any]: if regexes_to_ignore is not None: for s in regexes_to_ignore: A = np.array([re.sub(A_ ,'' ,A_ ) for x in predictions] ) A = np.array([re.sub(A_ ,'' ,A_ ) for x in references] ) else: A = np.asarray(A_ ) A = np.asarray(A_ ) if ignore_case: A = np.char.lower(A_ ) A = np.char.lower(A_ ) if ignore_punctuation: A = string.punctuation.maketrans('' ,'' ,string.punctuation ) A = np.char.translate(A_ ,table=A_ ) A = np.char.translate(A_ ,table=A_ ) if ignore_numbers: A = string.digits.maketrans('' ,'' ,string.digits ) A = np.char.translate(A_ ,table=A_ ) A = np.char.translate(A_ ,table=A_ ) A = predictions == references return {"exact_match": np.mean(A_ ) * 100}
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int = 1000 ): A = 2**power A = 0 while n: A , A = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" from __future__ import annotations _lowercase = 1.6021e-19 # units = C def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float , ): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif conductivity < 0: raise ValueError('Conductivity cannot be negative' ) elif electron_conc < 0: raise ValueError('Electron concentration cannot be negative' ) elif mobility < 0: raise ValueError('mobility cannot be negative' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to properly calculate the metrics on the # validation dataset when in a distributed system, and builds off the # `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowercase = 16 _lowercase = 32 def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 ): A = AutoTokenizer.from_pretrained('bert-base-cased' ) A = load_dataset('glue' , 'mrpc' ) def tokenize_function(snake_case__ : Optional[Any] ): # max_length=None => use the model max length (it's actually the default) A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A = datasets.map( snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(snake_case__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. A = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A = 16 elif accelerator.mixed_precision != "no": A = 8 else: A = None return tokenizer.pad( snake_case__ , padding='longest' , max_length=snake_case__ , pad_to_multiple_of=snake_case__ , return_tensors='pt' , ) # Instantiate dataloaders. A = DataLoader( tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) A = DataLoader( tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowercase = mocked_dataloaders # noqa: F811 def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] ): # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS' , snake_case__ ) == "1": A = 2 # Initialize accelerator A = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A = config['lr'] A = int(config['num_epochs'] ) A = int(config['seed'] ) A = int(config['batch_size'] ) A = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation A = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A = batch_size // MAX_GPU_BATCH_SIZE A = MAX_GPU_BATCH_SIZE set_seed(snake_case__ ) A , A = get_dataloaders(snake_case__ , snake_case__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=snake_case__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A = model.to(accelerator.device ) # Instantiate optimizer A = AdamW(params=model.parameters() , lr=snake_case__ ) # Instantiate scheduler A = get_linear_schedule_with_warmup( optimizer=snake_case__ , num_warmup_steps=100 , num_training_steps=(len(snake_case__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A , A , A , A , A = accelerator.prepare( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # Now we train the model for epoch in range(snake_case__ ): model.train() for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A = model(**snake_case__ ) A = outputs.loss A = loss / gradient_accumulation_steps accelerator.backward(snake_case__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() A = 0 for step, batch in enumerate(snake_case__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A = model(**snake_case__ ) A = outputs.logits.argmax(dim=-1 ) A , A = accelerator.gather((predictions, batch['labels']) ) # New Code # # First we check if it's a distributed system if accelerator.use_distributed: # Then see if we're on the last batch of our eval dataloader if step == len(snake_case__ ) - 1: # Last batch needs to be truncated on distributed systems as it contains additional samples A = predictions[: len(eval_dataloader.dataset ) - samples_seen] A = references[: len(eval_dataloader.dataset ) - samples_seen] else: # Otherwise we add the number of samples seen samples_seen += references.shape[0] # All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`: # accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=snake_case__ , references=snake_case__ , ) A = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , snake_case__ ) def _snake_case ( ): A = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=snake_case__ , default=snake_case__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) A = parser.parse_args() A = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(snake_case__ , snake_case__ ) if __name__ == "__main__": main()
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int ): assert ( isinstance(snake_case__ , snake_case__ ) and number_of_steps > 0 ), F'number_of_steps needs to be positive integer, your input {number_of_steps}' if number_of_steps == 1: return 1 A , A = 1, 1 for _ in range(number_of_steps - 1 ): A , A = current + previous, current return current if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: CommonSchedulerState # setable values _lowerCamelCase: jnp.ndarray _lowerCamelCase: jnp.ndarray _lowerCamelCase: Optional[int] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : str ,A_ : CommonSchedulerState ,A_ : jnp.ndarray ,A_ : jnp.ndarray ) -> Union[str, Any]: return cls(common=A_ ,init_noise_sigma=A_ ,timesteps=A_ ) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: DDPMSchedulerState class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers] _lowerCamelCase: jnp.dtype @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: return True @register_to_config def __init__( self : Optional[Any] ,A_ : int = 1000 ,A_ : float = 0.00_01 ,A_ : float = 0.02 ,A_ : str = "linear" ,A_ : Optional[jnp.ndarray] = None ,A_ : str = "fixed_small" ,A_ : bool = True ,A_ : str = "epsilon" ,A_ : jnp.dtype = jnp.floataa ,) -> Union[str, Any]: A = dtype def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState: if common is None: A = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution A = jnp.array(1.0 ,dtype=self.dtype ) A = jnp.arange(0 ,self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=A_ ,init_noise_sigma=A_ ,timesteps=A_ ,) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : DDPMSchedulerState ,A_ : jnp.ndarray ,A_ : Optional[int] = None ) -> jnp.ndarray: return sample def _SCREAMING_SNAKE_CASE ( self : int ,A_ : DDPMSchedulerState ,A_ : int ,A_ : Tuple = () ) -> DDPMSchedulerState: A = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 A = (jnp.arange(0 ,A_ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=A_ ,timesteps=A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : DDPMSchedulerState ,A_ : Optional[Any] ,A_ : str=None ,A_ : Optional[Any]=None ) -> Optional[int]: A = state.common.alphas_cumprod[t] A = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample A = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: A = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": A = jnp.clip(A_ ,a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": A = jnp.log(jnp.clip(A_ ,a_min=1e-20 ) ) elif variance_type == "fixed_large": A = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log A = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": A = variance A = state.common.betas[t] A = (predicted_variance + 1) / 2 A = frac * max_log + (1 - frac) * min_log return variance def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : DDPMSchedulerState ,A_ : jnp.ndarray ,A_ : int ,A_ : jnp.ndarray ,A_ : Optional[jax.random.KeyArray] = None ,A_ : bool = True ,) -> Union[FlaxDDPMSchedulerOutput, Tuple]: A = timestep if key is None: A = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: A , A = jnp.split(A_ ,sample.shape[1] ,axis=1 ) else: A = None # 1. compute alphas, betas A = state.common.alphas_cumprod[t] A = jnp.where(t > 0 ,state.common.alphas_cumprod[t - 1] ,jnp.array(1.0 ,dtype=self.dtype ) ) A = 1 - alpha_prod_t A = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": A = model_output elif self.config.prediction_type == "v_prediction": A = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' ' for the FlaxDDPMScheduler.' ) # 3. Clip "predicted x_0" if self.config.clip_sample: A = jnp.clip(A_ ,-1 ,1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t A = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): A = jax.random.split(A_ ,num=1 ) A = jax.random.normal(A_ ,shape=model_output.shape ,dtype=self.dtype ) return (self._get_variance(A_ ,A_ ,predicted_variance=A_ ) ** 0.5) * noise A = jnp.where(t > 0 ,random_variance() ,jnp.zeros(model_output.shape ,dtype=self.dtype ) ) A = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=A_ ,state=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : DDPMSchedulerState ,A_ : jnp.ndarray ,A_ : jnp.ndarray ,A_ : jnp.ndarray ,) -> jnp.ndarray: return add_noise_common(state.common ,A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : DDPMSchedulerState ,A_ : jnp.ndarray ,A_ : jnp.ndarray ,A_ : jnp.ndarray ,) -> jnp.ndarray: return get_velocity_common(state.common ,A_ ,A_ ,A_ ) def __len__( self : Any ) -> List[str]: return self.config.num_train_timesteps
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Optional[int] ,A_ : int=13 ,A_ : List[Any]=64 ,A_ : int=2 ,A_ : Tuple=3 ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Tuple=32 ,A_ : Tuple=5 ,A_ : str=4 ,A_ : List[str]=37 ,A_ : Optional[int]="gelu" ,A_ : Tuple=0.1 ,A_ : List[Any]=0.1 ,A_ : List[Any]=10 ,A_ : Optional[Any]=0.02 ,A_ : Dict=[1, 16, 4, 4] ,A_ : List[Any]=None ,) -> Any: A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = type_sequence_label_size A = initializer_range A = scope A = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size A = (self.image_size // 32) ** 2 A = num_patches + 1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [4, 8, 16, 32], 'num_groups': 2, } return ViTHybridConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,backbone_featmap_shape=self.backbone_featmap_shape ,backbone_config=A_ ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Tuple ,A_ : Optional[Any] ) -> Dict: A = ViTHybridModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ,A_ : Dict ,A_ : List[str] ) -> List[str]: A = self.type_sequence_label_size A = ViTHybridForImageClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () _lowerCamelCase: Dict = ( {'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification} if is_torch_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: Optional[int] = False _lowerCamelCase: Any = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = ViTHybridModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason='ViT does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ ,nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = _config_zero_init(A_ ) for model_class in self.all_model_classes: A = model_class(config=A_ ) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": A = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = ViTHybridModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: A = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): A = model(**A_ ) # verify the logits A = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,A_ ) A = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) ) @slow @require_accelerate def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' ) A = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' ,device_map='auto' ) A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ) A = model(**A_ ) A = outputs.logits # model predicts one of the 1000 ImageNet classes A = logits.argmax(-1 ).item() self.assertTrue(model.config.idalabel[predicted_class_idx] ,'tabby, tabby cat' )
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int , snake_case__ : int ): if a < 0 or b < 0: raise ValueError('the value of both inputs must be positive' ) A = str(bin(snake_case__ ) )[2:] # remove the leading "0b" A = str(bin(snake_case__ ) )[2:] # remove the leading "0b" A = max(len(snake_case__ ) , len(snake_case__ ) ) return "0b" + "".join( str(int(char_a == '1' and char_b == '1' ) ) for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ,A_ : List[str]=2 ,A_ : Optional[Any]=32 ,A_ : Union[str, Any]=16 ,A_ : Dict=3 ,A_ : Optional[Any]=True ,A_ : Optional[int]=True ,A_ : Optional[int]=32 ,A_ : List[Any]=4 ,A_ : Dict=[0, 1, 2, 3] ,A_ : str=4 ,A_ : List[Any]=37 ,A_ : Dict="gelu" ,A_ : Optional[Any]=0.1 ,A_ : Tuple=0.1 ,A_ : List[Any]=0.02 ,A_ : List[str]=3 ,A_ : Tuple=[1, 384, 24, 24] ,A_ : List[str]=True ,A_ : Union[str, Any]=None ,) -> Any: A = parent A = batch_size A = image_size A = patch_size A = num_channels A = is_training A = use_labels A = hidden_size A = num_hidden_layers A = backbone_out_indices A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = initializer_range A = num_labels A = backbone_featmap_shape A = scope A = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) A = (image_size // patch_size) ** 2 A = num_patches + 1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = { 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, 'hidden_sizes': [96, 192, 384, 768], 'num_groups': 2, } return DPTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=A_ ,backbone_featmap_shape=self.backbone_featmap_shape ,) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ) -> List[str]: A = DPTModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Any ,A_ : int ,A_ : Tuple ) -> Dict: A = self.num_labels A = DPTForDepthEstimation(A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : str ,A_ : int ) -> List[str]: A = self.num_labels A = DPTForSemanticSegmentation(A_ ) model.to(A_ ) model.eval() A = model(A_ ,labels=A_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () _lowerCamelCase: Optional[Any] = ( { '''depth-estimation''': DPTForDepthEstimation, '''feature-extraction''': DPTModel, '''image-segmentation''': DPTForSemanticSegmentation, } if is_torch_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: Dict = False _lowerCamelCase: str = False def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: A = DPTModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason='DPT does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) A = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ ,nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue A , A = self.model_tester.prepare_config_and_inputs_for_common() A = True if model_class in get_values(A_ ): continue A = model_class(A_ ) model.to(A_ ) model.train() A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ ) A = model(**A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue A , A = self.model_tester.prepare_config_and_inputs_for_common() A = False A = True if model_class in get_values(A_ ) or not model_class.supports_gradient_checkpointing: continue A = model_class(A_ ) model.to(A_ ) model.gradient_checkpointing_enable() model.train() A = self._prepare_for_class(A_ ,A_ ,return_labels=A_ ) A = model(**A_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = _config_zero_init(A_ ) for model_class in self.all_model_classes: A = model_class(config=A_ ) # Skip the check for the backbone A = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": A = [F'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: pass @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: A = DPTModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 'add' with self.assertRaises(A_ ): A = DPTForDepthEstimation(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: A = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' ) A = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(A_ ) A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): A = model(**A_ ) A = outputs.predicted_depth # verify the predicted depth A = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape ,A_ ) A = torch.tensor( [[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(A_ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,A_ ,atol=1e-4 ) )
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" def _snake_case ( ): for n in range(1 , 100_0000 ): yield n * (n + 1) // 2 def _snake_case ( snake_case__ : List[Any] ): A = 1 A = 2 while i * i <= n: A = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def _snake_case ( ): return next(i for i in triangle_number_generator() if count_divisors(snake_case__ ) > 500 ) if __name__ == "__main__": print(solution())
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase = { '''configuration_m2m_100''': ['''M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''M2M100Config''', '''M2M100OnnxConfig'''], '''tokenization_m2m_100''': ['''M2M100Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST''', '''M2M100ForConditionalGeneration''', '''M2M100Model''', '''M2M100PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig from .tokenization_mam_aaa import MaMaaaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mam_aaa import ( M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : str ): # noqa: E741 while r - l > 1: A = (l + r) // 2 if v[m] >= key: A = m else: A = m # noqa: E741 return r def _snake_case ( snake_case__ : list[int] ): if len(snake_case__ ) == 0: return 0 A = [0] * len(snake_case__ ) A = 1 A = v[0] for i in range(1 , len(snake_case__ ) ): if v[i] < tail[0]: A = v[i] elif v[i] > tail[length - 1]: A = v[i] length += 1 else: A = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _lowercase = TypeVar('''_T''') class lowerCAmelCase_ ( Generic[_T] ): '''simple docstring''' def __init__( self : List[str] ,A_ : Iterable[_T] | None = None ) -> None: A = list(iterable or [] ) A = [] def __len__( self : Optional[Any] ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : Tuple ) -> str: return F'Queue({tuple(self._stacka[::-1] + self._stacka )})' def _SCREAMING_SNAKE_CASE ( self : str ,A_ : _T ) -> None: self._stacka.append(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> _T: A = self._stacka.pop A = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('Queue is empty' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments _lowercase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[float] = field( default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''Whether to SortishSamler or not.'''} ) _lowerCamelCase: bool = field( default=_lowercase , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) _lowerCamelCase: bool = field(default=_lowercase , metadata={'''help''': '''whether to use adafactor'''} ) _lowerCamelCase: Optional[float] = field( default=_lowercase , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} ) _lowerCamelCase: Optional[float] = field( default=_lowercase , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} ) _lowerCamelCase: Optional[float] = field(default=_lowercase , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} ) _lowerCamelCase: Optional[float] = field( default=_lowercase , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} ) _lowerCamelCase: Optional[str] = field( default='''linear''' , metadata={'''help''': F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , )
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" from math import factorial def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float ): if successes > trials: raise ValueError('successes must be lower or equal to trials' ) if trials < 0 or successes < 0: raise ValueError('the function is defined for non-negative integers' ) if not isinstance(snake_case__ , snake_case__ ) or not isinstance(snake_case__ , snake_case__ ): raise ValueError('the function is defined for non-negative integers' ) if not 0 < prob < 1: raise ValueError('prob has to be in range of 1 - 0' ) A = (prob**successes) * ((1 - prob) ** (trials - successes)) # Calculate the binomial coefficient: n! / k!(n-k)! A = float(factorial(snake_case__ ) ) coefficient /= factorial(snake_case__ ) * factorial(trials - successes ) return probability * coefficient if __name__ == "__main__": from doctest import testmod testmod() print('''Probability of 2 successes out of 4 trails''') print('''with probability of 0.75 is:''', end=''' ''') print(binomial_distribution(2, 4, 0.75))
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''prophetnet.tokenizer'''} _lowercase = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } _lowercase = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } _lowercase = { '''microsoft/xprophetnet-large-wiki100-cased''': 5_12, } def _snake_case ( snake_case__ : Optional[Any] ): A = collections.OrderedDict() with open(snake_case__ , 'r' , encoding='utf-8' ) as reader: A = reader.readlines() for index, token in enumerate(snake_case__ ): A = token.rstrip('\n' ) A = index return vocab class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = VOCAB_FILES_NAMES _lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask'''] def __init__( self : int ,A_ : List[str] ,A_ : Dict="[SEP]" ,A_ : List[str]="[SEP]" ,A_ : Optional[int]="[SEP]" ,A_ : str="[UNK]" ,A_ : str="[PAD]" ,A_ : Dict="[CLS]" ,A_ : Optional[Any]="[MASK]" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[Any] ,) -> None: A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,sep_token=A_ ,unk_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab A = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4} for i in range(10 ): A = F'[unused{i}]' A = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab A = 12 A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(A_ ) def __getstate__( self : Tuple ) -> List[str]: A = self.__dict__.copy() A = None return state def __setstate__( self : Dict ,A_ : int ) -> str: A = d try: import sentencepiece as spm except ImportError: logger.warning( 'You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece' ' pip install sentencepiece' ) raise # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return ([0] * len(A_ )) + [1] return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: return len(self.sp_model ) + self.fairseq_offset def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ) -> str: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ) -> int: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> str: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Union[str, Any] ) -> Tuple: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] A = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" from datetime import datetime as dt import os from github import Github _lowercase = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def _snake_case ( ): A = Github(os.environ['GITHUB_TOKEN'] ) A = g.get_repo('huggingface/transformers' ) A = repo.get_issues(state='open' ) for issue in open_issues: A = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ ) A = comments[0] if len(snake_case__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _lowercase = logging.get_logger(__name__) _lowercase = {'''vocab_file''': '''spiece.model'''} _lowercase = { '''vocab_file''': { '''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''', } } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Dict ,A_ : List[Any] ,A_ : Union[str, Any]=False ,A_ : str=True ,A_ : Tuple=False ,A_ : Optional[int]="<s>" ,A_ : Tuple="</s>" ,A_ : Tuple="<unk>" ,A_ : Dict="<sep>" ,A_ : Optional[int]="<pad>" ,A_ : Optional[int]="<cls>" ,A_ : Any="<mask>" ,A_ : Optional[Any]=["<eop>", "<eod>"] ,A_ : Optional[Dict[str, Any]] = None ,**A_ : int ,) -> None: A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=A_ ,remove_space=A_ ,keep_accents=A_ ,bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,pad_token=A_ ,cls_token=A_ ,mask_token=A_ ,additional_special_tokens=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = 3 A = do_lower_case A = remove_space A = keep_accents A = vocab_file A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(A_ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( 'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. ' 'See https://pypi.org/project/jieba/ for installation.' ) A = jieba A = str.maketrans(' \n' ,'\u2582\u2583' ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return len(self.sp_model ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Union[str, Any] ) -> Dict: A = self.__dict__.copy() A = None return state def __setstate__( self : Optional[Any] ,A_ : int ) -> Any: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> Optional[Any]: if self.remove_space: A = ' '.join(inputs.strip().split() ) else: A = inputs A = outputs.replace('``' ,'"' ).replace('\'\'' ,'"' ) if not self.keep_accents: A = unicodedata.normalize('NFKD' ,A_ ) A = ''.join([c for c in outputs if not unicodedata.combining(A_ )] ) if self.do_lower_case: A = outputs.lower() return outputs def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ) -> List[str]: A = self.preprocess_text(A_ ) A = self.sp_model.encode(A_ ,out_type=A_ ) A = [] for piece in pieces: if len(A_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): A = self.sp_model.EncodeAsPieces(piece[:-1].replace(A_ ,'' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: A = cur_pieces[1:] else: A = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(A_ ) else: new_pieces.append(A_ ) return new_pieces def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ) -> Any: return self.sp_model.PieceToId(A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ) -> Optional[Any]: return self.sp_model.IdToPiece(A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> Optional[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is not None: return ([0] * len(A_ )) + [1] + ([0] * len(A_ )) + [1, 1] return ([0] * len(A_ )) + [1, 1] def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,) def _SCREAMING_SNAKE_CASE ( self : Dict ,*A_ : Optional[int] ,**A_ : Tuple ) -> Optional[int]: A = super()._decode(*A_ ,**A_ ) A = text.replace(' ' ,'' ).replace('\u2582' ,' ' ).replace('\u2583' ,'\n' ) return text
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''XGLMTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''XGLMTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XGLMForCausalLM''', '''XGLMModel''', '''XGLMPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxXGLMForCausalLM''', '''FlaxXGLMModel''', '''FlaxXGLMPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXGLMForCausalLM''', '''TFXGLMModel''', '''TFXGLMPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" from copy import deepcopy class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[Any] ,A_ : list[int] | None = None ,A_ : int | None = None ) -> None: if arr is None and size is not None: A = size A = [0] * size elif arr is not None: self.init(A_ ) else: raise ValueError('Either arr or size must be specified' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : list[int] ) -> None: A = len(A_ ) A = deepcopy(A_ ) for i in range(1 ,self.size ): A = self.next_(A_ ) if j < self.size: self.tree[j] += self.tree[i] def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> list[int]: A = self.tree[:] for i in range(self.size - 1 ,0 ,-1 ): A = self.next_(A_ ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : int ) -> int: return index + (index & (-index)) @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : int ) -> int: return index - (index & (-index)) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : int ) -> None: if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value A = self.next_(A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : int ) -> None: self.add(A_ ,value - self.get(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> int: if right == 0: return 0 A = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] A = self.prev(A_ ) return result def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : int ) -> int: return self.prefix(A_ ) - self.prefix(A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> int: return self.query(A_ ,index + 1 ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ) -> int: value -= self.tree[0] if value < 0: return -1 A = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 A = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''pytorch''', '''script''': '''run_ddp.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf_dist.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.p3.16xlarge''', '''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7}, }, ] ) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() ,encoding='utf-8' ,check=A_ ,) assert hasattr(self ,'env' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> Any: A = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings A = {'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None # creates estimator return HuggingFace( entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=A_ ,instance_count=A_ ,instance_type=self.instance_type ,debugger_hook_config=A_ ,hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,distribution=A_ ,py_version='py36' ,) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: TrainingJobAnalytics(A_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> str: # create estimator A = self.create_estimator(A_ ) # run training estimator.fit() # result dataframe A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] ) A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping A = ( Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy ) assert all(t <= self.results['eval_loss'] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' ,'w' ) as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,A_ )
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def _snake_case ( snake_case__ : Tuple ): return 1 / (1 + np.exp(-z )) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Tuple ): return (-y * np.log(snake_case__ ) - (1 - y) * np.log(1 - h )).mean() def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): A = np.dot(snake_case__ , snake_case__ ) return np.sum(y * scores - np.log(1 + np.exp(snake_case__ ) ) ) def _snake_case ( snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : List[Any]=7_0000 ): A = np.zeros(x.shape[1] ) for iterations in range(snake_case__ ): A = np.dot(snake_case__ , snake_case__ ) A = sigmoid_function(snake_case__ ) A = np.dot(x.T , h - y ) / y.size A = theta - alpha * gradient # updating the weights A = np.dot(snake_case__ , snake_case__ ) A = sigmoid_function(snake_case__ ) A = cost_function(snake_case__ , snake_case__ ) if iterations % 100 == 0: print(F'loss: {j} \t' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _lowercase = datasets.load_iris() _lowercase = iris.data[:, :2] _lowercase = (iris.target != 0) * 1 _lowercase = 0.1 _lowercase = logistic_reg(alpha, x, y, max_iterations=7_00_00) print('''theta: ''', theta) # printing the theta i.e our weights vector def _snake_case ( snake_case__ : Optional[int] ): return sigmoid_function( np.dot(snake_case__ , snake_case__ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((_lowercase) , (_lowercase)) = (x[:, 0].min(), x[:, 0].max()) ((_lowercase) , (_lowercase)) = (x[:, 1].min(), x[:, 1].max()) ((_lowercase) , (_lowercase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _lowercase = np.c_[xxa.ravel(), xxa.ravel()] _lowercase = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) _lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name _lowercase = ''' Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ... ) >>> pipe_prior.to("cuda") >>> prompt = "A red cartoon frog, 4k" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ... ) >>> pipe.to("cuda") >>> init_image = load_image( ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" ... "/kandinsky/frog.png" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save("red_frog.png") ``` ''' def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : str=8 ): A = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 A = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int]=512 , snake_case__ : Tuple=512 ): A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) A = np.array(pil_image.convert('RGB' ) ) A = arr.astype(np.floataa ) / 127.5 - 1 A = np.transpose(snake_case__ , [2, 0, 1] ) A = torch.from_numpy(snake_case__ ).unsqueeze(0 ) return image class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : UNetaDConditionModel ,A_ : DDPMScheduler ,A_ : VQModel ,) -> Union[str, Any]: super().__init__() self.register_modules( unet=A_ ,scheduler=A_ ,movq=A_ ,) A = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : Tuple ,A_ : Tuple ) -> List[Any]: # get the original timestep using init_timestep A = min(int(num_inference_steps * strength ) ,A_ ) A = max(num_inference_steps - init_timestep ,0 ) A = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Tuple ,A_ : Optional[int] ,A_ : str ,A_ : Union[str, Any] ,A_ : Optional[Any] ,A_ : Union[str, Any]=None ) -> List[Any]: if not isinstance(A_ ,(torch.Tensor, PIL.Image.Image, list) ): raise ValueError( F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(A_ )}' ) A = image.to(device=A_ ,dtype=A_ ) A = batch_size * num_images_per_prompt if image.shape[1] == 4: A = image else: if isinstance(A_ ,A_ ) and len(A_ ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(A_ )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) elif isinstance(A_ ,A_ ): A = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A_ ) ] A = torch.cat(A_ ,dim=0 ) else: A = self.movq.encode(A_ ).latent_dist.sample(A_ ) A = self.movq.config.scaling_factor * init_latents A = torch.cat([init_latents] ,dim=0 ) A = init_latents.shape A = randn_tensor(A_ ,generator=A_ ,device=A_ ,dtype=A_ ) # get latents A = self.scheduler.add_noise(A_ ,A_ ,A_ ) A = init_latents return latents def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str=0 ) -> Optional[Any]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) A = torch.device(F'cuda:{gpu_id}' ) A = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple=0 ) -> List[Any]: if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) A = torch.device(F'cuda:{gpu_id}' ) if self.device.type != "cpu": self.to('cpu' ,silence_dtype_warnings=A_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) A = None for cpu_offloaded_model in [self.unet, self.movq]: A , A = cpu_offload_with_hook(A_ ,A_ ,prev_module_hook=A_ ) # We'll offload the last model manually. A = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if not hasattr(self.unet ,'_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(A_ ,'_hf_hook' ) and hasattr(module._hf_hook ,'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(A_ ) def __call__( self : Optional[Any] ,A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A_ : int = 512 ,A_ : int = 512 ,A_ : int = 100 ,A_ : float = 4.0 ,A_ : float = 0.3 ,A_ : int = 1 ,A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A_ : Optional[str] = "pil" ,A_ : bool = True ,) -> List[Any]: A = self._execution_device A = guidance_scale > 1.0 if isinstance(A_ ,A_ ): A = torch.cat(A_ ,dim=0 ) A = image_embeds.shape[0] if isinstance(A_ ,A_ ): A = torch.cat(A_ ,dim=0 ) if do_classifier_free_guidance: A = image_embeds.repeat_interleave(A_ ,dim=0 ) A = negative_image_embeds.repeat_interleave(A_ ,dim=0 ) A = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=A_ ) if not isinstance(A_ ,A_ ): A = [image] if not all(isinstance(A_ ,(PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( F'Input is in incorrect format: {[type(A_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' ) A = torch.cat([prepare_image(A_ ,A_ ,A_ ) for i in image] ,dim=0 ) A = image.to(dtype=image_embeds.dtype ,device=A_ ) A = self.movq.encode(A_ )['latents'] A = latents.repeat_interleave(A_ ,dim=0 ) self.scheduler.set_timesteps(A_ ,device=A_ ) A , A = self.get_timesteps(A_ ,A_ ,A_ ) A = timesteps[:1].repeat(batch_size * num_images_per_prompt ) A , A = downscale_height_and_width(A_ ,A_ ,self.movq_scale_factor ) A = self.prepare_latents( A_ ,A_ ,A_ ,A_ ,image_embeds.dtype ,A_ ,A_ ) for i, t in enumerate(self.progress_bar(A_ ) ): # expand the latents if we are doing classifier free guidance A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents A = {'image_embeds': image_embeds} A = self.unet( sample=A_ ,timestep=A_ ,encoder_hidden_states=A_ ,added_cond_kwargs=A_ ,return_dict=A_ ,)[0] if do_classifier_free_guidance: A , A = noise_pred.split(latents.shape[1] ,dim=1 ) A , A = noise_pred.chunk(2 ) A , A = variance_pred.chunk(2 ) A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) A = torch.cat([noise_pred, variance_pred_text] ,dim=1 ) if not ( hasattr(self.scheduler.config ,'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): A , A = noise_pred.split(latents.shape[1] ,dim=1 ) # compute the previous noisy sample x_t -> x_t-1 A = self.scheduler.step( A_ ,A_ ,A_ ,generator=A_ ,)[0] # post-processing A = self.movq.decode(A_ ,force_not_quantize=A_ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' ) if output_type in ["np", "pil"]: A = image * 0.5 + 0.5 A = image.clamp(0 ,1 ) A = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy() if output_type == "pil": A = self.numpy_to_pil(A_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=A_ )
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : List[str] ,A_ : List[Any]=sys.maxsize ) -> List[str]: A = 'bilinear' A = max_size A = short_edge_length def __call__( self : Dict ,A_ : Union[str, Any] ) -> int: A = [] for img in imgs: A , A = img.shape[:2] # later: provide list and randomly choose index for resize A = np.random.randint(self.short_edge_length[0] ,self.short_edge_length[1] + 1 ) if size == 0: return img A = size * 1.0 / min(A_ ,A_ ) if h < w: A , A = size, scale * w else: A , A = scale * h, size if max(A_ ,A_ ) > self.max_size: A = self.max_size * 1.0 / max(A_ ,A_ ) A = newh * scale A = neww * scale A = int(neww + 0.5 ) A = int(newh + 0.5 ) if img.dtype == np.uinta: A = Image.fromarray(A_ ) A = pil_image.resize((neww, newh) ,PILImageResampling.BILINEAR ) A = np.asarray(A_ ) else: A = img.permute(2 ,0 ,1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A = nn.functional.interpolate( A_ ,(newh, neww) ,mode=self.interp_method ,align_corners=A_ ).squeeze(0 ) img_augs.append(A_ ) return img_augs class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : List[str] ) -> int: A = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] ,cfg.INPUT.MAX_SIZE_TEST ) A = cfg.INPUT.FORMAT A = cfg.SIZE_DIVISIBILITY A = cfg.PAD_VALUE A = cfg.INPUT.MAX_SIZE_TEST A = cfg.MODEL.DEVICE A = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 ) A = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) ,1 ,1 ) A = lambda A_ : (x - self.pixel_mean) / self.pixel_std def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> List[Any]: A = tuple(max(A_ ) for s in zip(*[img.shape for img in images] ) ) A = [im.shape[-2:] for im in images] A = [ nn.functional.pad( A_ ,[0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] ,value=self.pad_value ,) for size, im in zip(A_ ,A_ ) ] return torch.stack(A_ ), torch.tensor(A_ ) def __call__( self : Any ,A_ : int ,A_ : List[Any]=False ) -> int: with torch.no_grad(): if not isinstance(A_ ,A_ ): A = [images] if single_image: assert len(A_ ) == 1 for i in range(len(A_ ) ): if isinstance(images[i] ,torch.Tensor ): images.insert(A_ ,images.pop(A_ ).to(self.device ).float() ) elif not isinstance(images[i] ,torch.Tensor ): images.insert( A_ ,torch.as_tensor(img_tensorize(images.pop(A_ ) ,input_format=self.input_format ) ) .to(self.device ) .float() ,) # resize smallest edge A = torch.tensor([im.shape[:2] for im in images] ) A = self.aug(A_ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A = [self.normalizer(A_ ) for x in images] # now pad them to do the following operations A , A = self.pad(A_ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A = torch.true_divide(A_ ,A_ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple[int, int] ): assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A , A = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any ,A_ : Optional[int] ,A_ : str=7 ,A_ : Any=3 ,A_ : Any=18 ,A_ : Union[str, Any]=30 ,A_ : List[Any]=400 ,A_ : Union[str, Any]=True ,A_ : Dict=32 ,A_ : int=True ,) -> Union[str, Any]: A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size_divisor A = do_rescale def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[Any] = GLPNImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = GLPNImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size_divisor' ) ) self.assertTrue(hasattr(A_ ,'resample' ) ) self.assertTrue(hasattr(A_ ,'do_rescale' ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,Image.Image ) # Test not batched input (GLPNImageProcessor doesn't support batching) A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,np.ndarray ) # Test not batched input (GLPNImageProcessor doesn't support batching) A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,torch.Tensor ) # Test not batched input (GLPNImageProcessor doesn't support batching) A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 ) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : List[str] ,*A_ : Optional[int] ,**A_ : int ) -> None: warnings.warn( 'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use CLIPImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowercase = logging.get_logger(__name__) _lowercase = { '''google/bit-50''': '''https://huggingface.co/google/bit-50/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase , _lowercase ): '''simple docstring''' _lowerCamelCase: Any = '''bit''' _lowerCamelCase: List[Any] = ['''preactivation''', '''bottleneck'''] _lowerCamelCase: Tuple = ['''SAME''', '''VALID'''] def __init__( self : Union[str, Any] ,A_ : Tuple=3 ,A_ : List[Any]=64 ,A_ : Optional[int]=[256, 512, 1024, 2048] ,A_ : List[Any]=[3, 4, 6, 3] ,A_ : Optional[Any]="preactivation" ,A_ : Optional[int]="relu" ,A_ : Optional[Any]=None ,A_ : Any=32 ,A_ : Union[str, Any]=0.0 ,A_ : Optional[int]=False ,A_ : Tuple=32 ,A_ : Dict=1 ,A_ : str=None ,A_ : List[str]=None ,**A_ : Dict ,) -> Any: super().__init__(**A_ ) if layer_type not in self.layer_types: raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types )}' ) if global_padding is not None: if global_padding.upper() in self.supported_padding: A = global_padding.upper() else: raise ValueError(F'Padding strategy {global_padding} not supported' ) A = num_channels A = embedding_size A = hidden_sizes A = depths A = layer_type A = hidden_act A = global_padding A = num_groups A = drop_path_rate A = embedding_dynamic_padding A = output_stride A = width_factor A = ['stem'] + [F'stage{idx}' for idx in range(1 ,len(A_ ) + 1 )] A , A = get_aligned_output_features_output_indices( out_features=A_ ,out_indices=A_ ,stage_names=self.stage_names )
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''camembert''' def __init__( self : Optional[int] ,A_ : Any=3_0522 ,A_ : Any=768 ,A_ : Tuple=12 ,A_ : Optional[int]=12 ,A_ : Union[str, Any]=3072 ,A_ : Dict="gelu" ,A_ : Optional[Any]=0.1 ,A_ : Union[str, Any]=0.1 ,A_ : List[Any]=512 ,A_ : Tuple=2 ,A_ : Optional[int]=0.02 ,A_ : Optional[int]=1e-12 ,A_ : int=1 ,A_ : Tuple=0 ,A_ : List[Any]=2 ,A_ : Optional[int]="absolute" ,A_ : Optional[Any]=True ,A_ : Union[str, Any]=None ,**A_ : Optional[int] ,) -> List[str]: super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = position_embedding_type A = use_cache A = classifier_dropout class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" from __future__ import annotations class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ,A_ : str ,A_ : str ) -> int: A , A = text, pattern A , A = len(A_ ), len(A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: for i in range(self.patLen - 1 ,-1 ,-1 ): if char == self.pattern[i]: return i return -1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> int: for i in range(self.patLen - 1 ,-1 ,-1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> list[int]: # searches pattern in text and returns index positions A = [] for i in range(self.textLen - self.patLen + 1 ): A = self.mismatch_in_text(A_ ) if mismatch_index == -1: positions.append(A_ ) else: A = self.match_in_pattern(self.text[mismatch_index] ) A = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _lowercase = '''ABAABA''' _lowercase = '''AB''' _lowercase = BoyerMooreSearch(text, pattern) _lowercase = bms.bad_character_heuristic() if len(positions) == 0: print('''No match found''') else: print('''Pattern found in following positions: ''') print(positions)
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' ,return_dict=A_ ).to(A_ ) A = AutoTokenizer.from_pretrained('google/mt5-small' ) A = tokenizer('Hello there' ,return_tensors='pt' ).input_ids A = tokenizer('Hi I am' ,return_tensors='pt' ).input_ids A = model(input_ids.to(A_ ) ,labels=labels.to(A_ ) ).loss A = -(labels.shape[-1] * loss.item()) A = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 _lowercase = { '''return_dict''': False, '''output_hidden_states''': True, '''output_attentions''': True, '''torchscript''': True, '''torch_dtype''': '''float16''', '''use_bfloat16''': True, '''tf_legacy_loss''': True, '''pruned_heads''': {'''a''': 1}, '''tie_word_embeddings''': False, '''is_decoder''': True, '''cross_attention_hidden_size''': 1_28, '''add_cross_attention''': True, '''tie_encoder_decoder''': True, '''max_length''': 50, '''min_length''': 3, '''do_sample''': True, '''early_stopping''': True, '''num_beams''': 3, '''num_beam_groups''': 3, '''diversity_penalty''': 0.5, '''temperature''': 2.0, '''top_k''': 10, '''top_p''': 0.7, '''typical_p''': 0.2, '''repetition_penalty''': 0.8, '''length_penalty''': 0.8, '''no_repeat_ngram_size''': 5, '''encoder_no_repeat_ngram_size''': 5, '''bad_words_ids''': [1, 2, 3], '''num_return_sequences''': 3, '''chunk_size_feed_forward''': 5, '''output_scores''': True, '''return_dict_in_generate''': True, '''forced_bos_token_id''': 2, '''forced_eos_token_id''': 3, '''remove_invalid_values''': True, '''architectures''': ['''BertModel'''], '''finetuning_task''': '''translation''', '''id2label''': {0: '''label'''}, '''label2id''': {'''label''': '''0'''}, '''tokenizer_class''': '''BertTokenizerFast''', '''prefix''': '''prefix''', '''bos_token_id''': 6, '''pad_token_id''': 7, '''eos_token_id''': 8, '''sep_token_id''': 9, '''decoder_start_token_id''': 10, '''exponential_decay_length_penalty''': (5, 1.01), '''suppress_tokens''': [0, 1], '''begin_suppress_tokens''': 2, '''task_specific_params''': {'''translation''': '''some_params'''}, '''problem_type''': '''regression''', } @is_staging_test class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) -> str: A = TOKEN HfFolder.save_token(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : int ) -> Optional[int]: try: delete_repo(token=cls._token ,repo_id='test-config' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id='test-dynamic-config' ) except HTTPError: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: A = BertConfig( vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 ) config.push_to_hub('test-config' ,use_auth_token=self._token ) A = BertConfig.from_pretrained(F'{USER}/test-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A_ ,getattr(A_ ,A_ ) ) # Reset repo delete_repo(token=self._token ,repo_id='test-config' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(A_ ,repo_id='test-config' ,push_to_hub=A_ ,use_auth_token=self._token ) A = BertConfig.from_pretrained(F'{USER}/test-config' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A_ ,getattr(A_ ,A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = BertConfig( vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 ) config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token ) A = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A_ ,getattr(A_ ,A_ ) ) # Reset repo delete_repo(token=self._token ,repo_id='valid_org/test-config-org' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( A_ ,repo_id='valid_org/test-config-org' ,push_to_hub=A_ ,use_auth_token=self._token ) A = BertConfig.from_pretrained('valid_org/test-config-org' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(A_ ,getattr(A_ ,A_ ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: CustomConfig.register_for_auto_class() A = CustomConfig(attribute=42 ) config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} ) A = AutoConfig.from_pretrained(F'{USER}/test-dynamic-config' ,trust_remote_code=A_ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' ) self.assertEqual(new_config.attribute ,42 ) class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated A = c.n_embd + 1 # int A = c.resid_pdrop + 1.0 # float A = not c.scale_attn_weights # bool A = c.summary_type + 'foo' # str c.update_from_string( F'n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}' ) self.assertEqual(A_ ,c.n_embd ,'mismatch for key: n_embd' ) self.assertEqual(A_ ,c.resid_pdrop ,'mismatch for key: resid_pdrop' ) self.assertEqual(A_ ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' ) self.assertEqual(A_ ,c.summary_type ,'mismatch for key: summary_type' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: A = PretrainedConfig() A = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( A_ ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] ) A = [key for key, value in config_common_kwargs.items() if value == getattr(A_ ,A_ )] if len(A_ ) > 0: raise ValueError( 'The following keys are set with the default values in' ' `test_configuration_common.config_common_kwargs` pick another value for them:' F' {", ".join(A_ )}.' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: with self.assertRaises(A_ ): # config is in subfolder, the following should not work without specifying the subfolder A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ) A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' ) self.assertIsNotNone(A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down A = mock.Mock() A = 500 A = {} A = HTTPError A = {} # Download this model to make sure it's in the cache. A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' ,return_value=A_ ) as mock_head: A = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' ) # This check we did call the fake head request mock_head.assert_called() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 A = BertConfig.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = AutoConfig.from_pretrained('bert-base-cased' ) A = ['config.4.0.0.json'] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(A_ ) A = 2 json.dump(configuration.to_dict() ,open(os.path.join(A_ ,'config.4.0.0.json' ) ,'w' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 A = AutoConfig.from_pretrained(A_ ) self.assertEqual(new_configuration.hidden_size ,2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 A = ['config.42.0.0.json'] A = 768 configuration.save_pretrained(A_ ) shutil.move(os.path.join(A_ ,'config.4.0.0.json' ) ,os.path.join(A_ ,'config.42.0.0.json' ) ) A = AutoConfig.from_pretrained(A_ ) self.assertEqual(new_configuration.hidden_size ,768 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. A = 'hf-internal-testing/test-two-configs' import transformers as new_transformers A = 'v4.0.0' A , A = new_transformers.models.auto.AutoConfig.from_pretrained( A_ ,return_unused_kwargs=A_ ) self.assertEqual(new_configuration.hidden_size ,2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(A_ ,{} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers A = 'v3.0.0' A = old_transformers.models.auto.AutoConfig.from_pretrained(A_ ) self.assertEqual(old_configuration.hidden_size ,768 )
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig _lowercase = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Union[str, Any] = '''ernie_m''' _lowerCamelCase: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self : Optional[Any] ,A_ : int = 25_0002 ,A_ : int = 768 ,A_ : int = 12 ,A_ : int = 12 ,A_ : int = 3072 ,A_ : str = "gelu" ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : int = 514 ,A_ : float = 0.02 ,A_ : int = 1 ,A_ : float = 1e-05 ,A_ : Tuple=None ,A_ : str=False ,A_ : Tuple=0.0 ,**A_ : Optional[Any] ,) -> Dict: super().__init__(pad_token_id=A_ ,**A_ ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = initializer_range A = layer_norm_eps A = classifier_dropout A = is_decoder A = act_dropout
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def _snake_case ( snake_case__ : Tuple , snake_case__ : int ): A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) A = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) A = transform(snake_case__ ).unsqueeze(0 ).to(snake_case__ ) return image def _snake_case ( snake_case__ : Optional[Any] ): if "visual_encoder" in key: A = re.sub('visual_encoder*' , 'vision_model.encoder' , snake_case__ ) if "blocks" in key: A = re.sub(r'blocks' , 'layers' , snake_case__ ) if "attn" in key: A = re.sub(r'attn' , 'self_attn' , snake_case__ ) if "norm1" in key: A = re.sub(r'norm1' , 'layer_norm1' , snake_case__ ) if "norm2" in key: A = re.sub(r'norm2' , 'layer_norm2' , snake_case__ ) if "encoder.norm" in key: A = re.sub(r'encoder.norm' , 'post_layernorm' , snake_case__ ) if "encoder.patch_embed.proj" in key: A = re.sub(r'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , snake_case__ ) if "encoder.pos_embed" in key: A = re.sub(r'encoder.pos_embed' , 'embeddings.position_embedding' , snake_case__ ) if "encoder.cls_token" in key: A = re.sub(r'encoder.cls_token' , 'embeddings.class_embedding' , snake_case__ ) if "self_attn" in key: A = re.sub(r'self_attn.proj' , 'self_attn.projection' , snake_case__ ) return key @torch.no_grad() def _snake_case ( snake_case__ : Any , snake_case__ : str=None ): if config_path is not None: A = BlipConfig.from_pretrained(snake_case__ ) else: A = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) A = BlipForConditionalGeneration(snake_case__ ).eval() A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth' A = blip_decoder(pretrained=snake_case__ , image_size=384 , vit='base' ) A = pt_model.eval() A = pt_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(snake_case__ ) A = rename_key(snake_case__ ) A = value hf_model.load_state_dict(snake_case__ ) A = 384 A = load_demo_image(image_size=snake_case__ , device='cpu' ) A = BertTokenizer.from_pretrained('bert-base-uncased' ) A = tokenizer(['a picture of'] ).input_ids A = hf_model.generate(snake_case__ , snake_case__ ) assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] A = hf_model.generate(snake_case__ ) assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(snake_case__ ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' A = ( 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth' ) A = blip_vqa(pretrained=snake_case__ , image_size=snake_case__ , vit='base' ) vqa_model.eval() A = vqa_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(snake_case__ ) A = rename_key(snake_case__ ) A = value A = BlipForQuestionAnswering(snake_case__ ) hf_vqa_model.load_state_dict(snake_case__ ) A = ['How many dogs are in this image?'] A = tokenizer(snake_case__ , return_tensors='pt' ).input_ids A = hf_vqa_model.generate(snake_case__ , snake_case__ ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' ) A = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth' A = blip_itm(pretrained=snake_case__ , image_size=snake_case__ , vit='base' ) itm_model.eval() A = itm_model.state_dict() for key in modified_state_dict.copy(): A = modified_state_dict.pop(snake_case__ ) A = rename_key(snake_case__ ) A = value A = BlipForImageTextRetrieval(snake_case__ ) A = ['A picture of a woman with a dog sitting in a beach'] A = tokenizer( snake_case__ , return_tensors='pt' , padding='max_length' , truncation=snake_case__ , max_length=35 , ).input_ids hf_itm_model.load_state_dict(snake_case__ ) hf_itm_model.eval() A = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ ) A = hf_itm_model(snake_case__ , snake_case__ , use_itm_head=snake_case__ ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _lowercase = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Dict ,A_ : Optional[Any]=3 ,A_ : Optional[int]=32 ,A_ : Dict=3 ,A_ : Tuple=10 ,A_ : Tuple=[8, 16, 32, 64] ,A_ : Optional[int]=[1, 1, 2, 1] ,A_ : int=True ,A_ : Dict=True ,A_ : Union[str, Any]="relu" ,A_ : Dict=3 ,A_ : Optional[int]=None ,A_ : int=["stage2", "stage3", "stage4"] ,A_ : Tuple=[2, 3, 4] ,A_ : List[str]=1 ,) -> str: A = parent A = batch_size A = image_size A = num_channels A = embeddings_size A = hidden_sizes A = depths A = is_training A = use_labels A = hidden_act A = num_labels A = scope A = len(A_ ) A = out_features A = out_indices A = num_groups def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.num_labels ) A = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return BitConfig( num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Any ,A_ : List[Any] ) -> Optional[Any]: A = BitModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any] ,A_ : int ,A_ : Tuple ) -> Tuple: A = self.num_labels A = BitForImageClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Optional[Any] ) -> str: A = BitBackbone(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] ) # verify backbone works with out_features=None A = None A = BitBackbone(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: A = self.prepare_config_and_inputs() A , A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () _lowerCamelCase: List[Any] = ( {'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification} if is_torch_available() else {} ) _lowerCamelCase: Union[str, Any] = False _lowerCamelCase: Optional[int] = False _lowerCamelCase: List[str] = False _lowerCamelCase: int = False _lowerCamelCase: Any = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = BitModelTester(self ) A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: return @unittest.skip(reason='Bit does not output attentions' ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: pass @unittest.skip(reason='Bit does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skip(reason='Bit does not support input and output embeddings' ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: pass def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(config=A_ ) for name, module in model.named_modules(): if isinstance(A_ ,(nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) self.assertTrue( torch.all(module.bias == 0 ) ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: def check_hidden_states_output(A_ : Any ,A_ : List[str] ,A_ : int ): A = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(A_ ,A_ ) ) A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states A = self.model_tester.num_stages self.assertEqual(len(A_ ) ,expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: A = layer_type A = True check_hidden_states_output(A_ ,A_ ,A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A = True check_hidden_states_output(A_ ,A_ ,A_ ) @unittest.skip(reason='Bit does not use feedforward chunking' ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = BitModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): A = model(**A_ ) # verify the logits A = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,A_ ) A = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) ) @require_torch class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = (BitBackbone,) if is_torch_available() else () _lowerCamelCase: List[str] = BitConfig _lowerCamelCase: Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: A = BitModelTester(self )
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ,A_ : List[Any]=7 ,A_ : Dict=3 ,A_ : Any=18 ,A_ : List[Any]=30 ,A_ : List[Any]=400 ,A_ : List[Any]=True ,A_ : Optional[int]=None ,A_ : int=True ,A_ : Any=None ,A_ : Optional[Any]=True ,A_ : Union[str, Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,A_ : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,A_ : Union[str, Any]=True ,) -> int: A = size if size is not None else {'height': 224, 'width': 224} A = crop_size if crop_size is not None else {'height': 18, 'width': 18} A = parent A = batch_size A = num_channels A = image_size A = min_resolution A = max_resolution A = do_resize A = size A = do_center_crop A = crop_size A = do_normalize A = image_mean A = image_std A = do_convert_rgb def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=False ,A_ : List[str]=False ,A_ : Optional[Any]=False ) -> List[Any]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: A = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) ) else: A = [] for i in range(self.batch_size ): A , A = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 ) image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension A = [Image.fromarray(np.moveaxis(A_ ,0 ,-1 ) ) for x in image_inputs] if torchify: A = [torch.from_numpy(A_ ) for x in image_inputs] return image_inputs @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Tuple = ChineseCLIPImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: A = ChineseCLIPImageProcessingTester(self ,do_center_crop=A_ ) @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_center_crop' ) ) self.assertTrue(hasattr(A_ ,'center_crop' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) self.assertTrue(hasattr(A_ ,'image_mean' ) ) self.assertTrue(hasattr(A_ ,'image_std' ) ) self.assertTrue(hasattr(A_ ,'do_convert_rgb' ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: A = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 224, 'width': 224} ) self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} ) A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: pass def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,Image.Image ) # Test not batched input A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched A = image_processing(A_ ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ,numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,np.ndarray ) # Test not batched input A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched A = image_processing(A_ ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ,torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,torch.Tensor ) # Test not batched input A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched A = image_processing(A_ ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) @require_torch @require_vision class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = ChineseCLIPImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=A_ ) A = 3 @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_ ,'do_resize' ) ) self.assertTrue(hasattr(A_ ,'size' ) ) self.assertTrue(hasattr(A_ ,'do_center_crop' ) ) self.assertTrue(hasattr(A_ ,'center_crop' ) ) self.assertTrue(hasattr(A_ ,'do_normalize' ) ) self.assertTrue(hasattr(A_ ,'image_mean' ) ) self.assertTrue(hasattr(A_ ,'image_std' ) ) self.assertTrue(hasattr(A_ ,'do_convert_rgb' ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: # Initialize image_processing A = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_ ,Image.Image ) # Test not batched input A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched A = image_processing(A_ ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig _lowercase = { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''', } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Dict = '''albert''' def __init__( self : str ,A_ : str=3_0000 ,A_ : Optional[Any]=128 ,A_ : Union[str, Any]=4096 ,A_ : int=12 ,A_ : Any=1 ,A_ : List[Any]=64 ,A_ : str=1_6384 ,A_ : int=1 ,A_ : Any="gelu_new" ,A_ : Any=0 ,A_ : int=0 ,A_ : int=512 ,A_ : Tuple=2 ,A_ : Any=0.02 ,A_ : List[str]=1e-12 ,A_ : Any=0.1 ,A_ : List[Any]="absolute" ,A_ : Union[str, Any]=0 ,A_ : Tuple=2 ,A_ : Dict=3 ,**A_ : int ,) -> Tuple: super().__init__(pad_token_id=A_ ,bos_token_id=A_ ,eos_token_id=A_ ,**A_ ) A = vocab_size A = embedding_size A = hidden_size A = num_hidden_layers A = num_hidden_groups A = num_attention_heads A = inner_group_num A = hidden_act A = intermediate_size A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = initializer_range A = layer_norm_eps A = classifier_dropout_prob A = position_embedding_type class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: int _lowerCamelCase: int class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : int ) -> Union[str, Any]: A = [[] for _ in range(A_ )] A = size def __getitem__( self : str ,A_ : int ) -> Iterator[Edge]: return iter(self._graph[vertex] ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: return self._size def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : int ,A_ : int ) -> Tuple: if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.' ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).' ) self._graph[from_vertex].append(Edge(A_ ,A_ ) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : int ) -> int | None: A = deque([start_vertex] ) A = [None] * self.size A = 0 while queue: A = queue.popleft() A = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A = current_distance + edge.weight A = distances[edge.destination_vertex] if ( isinstance(A_ ,A_ ) and new_distance >= dest_vertex_distance ): continue A = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.' ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''AlbertTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''AlbertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''AlbertForMaskedLM''', '''AlbertForMultipleChoice''', '''AlbertForPreTraining''', '''AlbertForQuestionAnswering''', '''AlbertForSequenceClassification''', '''AlbertForTokenClassification''', '''AlbertModel''', '''AlbertPreTrainedModel''', '''load_tf_weights_in_albert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFAlbertForMaskedLM''', '''TFAlbertForMultipleChoice''', '''TFAlbertForPreTraining''', '''TFAlbertForQuestionAnswering''', '''TFAlbertForSequenceClassification''', '''TFAlbertForTokenClassification''', '''TFAlbertMainLayer''', '''TFAlbertModel''', '''TFAlbertPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxAlbertForMaskedLM''', '''FlaxAlbertForMultipleChoice''', '''FlaxAlbertForPreTraining''', '''FlaxAlbertForQuestionAnswering''', '''FlaxAlbertForSequenceClassification''', '''FlaxAlbertForTokenClassification''', '''FlaxAlbertModel''', '''FlaxAlbertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = RobertaTokenizer _lowerCamelCase: int = RobertaTokenizerFast _lowerCamelCase: Tuple = True _lowerCamelCase: Optional[int] = {'''cls_token''': '''<s>'''} def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] A = {'unk_token': '<unk>'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,**A_ : Dict ) -> List[str]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : Optional[int] ) -> Any: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ) -> str: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: A = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'lower newer' A = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] A = tokenizer.tokenize(A_ ) # , add_prefix_space=True) self.assertListEqual(A_ ,A_ ) A = tokens + [tokenizer.unk_token] A = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=A_ ) ,[0, 3_1414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=A_ ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = self.tokenizer_class.from_pretrained('roberta-base' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.encode( 'sequence builders' ,add_special_tokens=A_ ,add_prefix_space=A_ ) A = tokenizer.encode( 'sequence builders' ,'multi-sequence build' ,add_special_tokens=A_ ,add_prefix_space=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: A = self.get_tokenizer() A = 'Encode this sequence.' A = tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ ) A = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A_ ,A_ ) A = tokenizer.encode(A_ ,add_special_tokens=A_ ,add_prefix_space=A_ ) A = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A_ ,A_ ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) A = tokenizer.encode(A_ ,add_special_tokens=A_ ) A = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A_ ,A_ ) # Testing spaces after special tokens A = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(A_ ,lstrip=A_ ,rstrip=A_ )} ) # mask token has a left space A = tokenizer.convert_tokens_to_ids(A_ ) A = 'Encode <mask> sequence' A = 'Encode <mask>sequence' A = tokenizer.encode(A_ ) A = encoded.index(A_ ) A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A_ ,A_ ) A = tokenizer.encode(A_ ) A = encoded.index(A_ ) A = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ ) A = self.tokenizer_class.from_pretrained(A_ ,**A_ ) A = 'A, <mask> AllenNLP sentence.' A = tokenizer_r.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ ) A = tokenizer_p.encode_plus(A_ ,add_special_tokens=A_ ,return_token_type_ids=A_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,) A = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) A = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] ) self.assertSequenceEqual( A_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( A_ ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ): A = self.rust_tokenizer_class.from_pretrained( self.tmpdirname ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) A = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,A_ ) self.assertEqual(post_processor_state['add_prefix_space'] ,A_ ) self.assertEqual(post_processor_state['trim_offsets'] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` A = F'{text_of_1_token} {text_of_1_token}' A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) ,) A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(A_ ) + 1, len(A_ ) + 1 + len(A_ )) ,) A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(A_ ), len(A_ ) + 1 + len(A_ )) ,) A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(len(A_ ), len(A_ ) + 1 + len(A_ )) ,) A = F' {text}' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(A_ ) + 1, 1 + len(A_ ) + 1 + len(A_ )) ,) A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) ,) A = self.rust_tokenizer_class.from_pretrained( A_ ,use_fast=A_ ,add_prefix_space=A_ ,trim_offsets=A_ ) A = tokenizer_r(A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ) self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(A_ )) ) self.assertEqual( encoding.offset_mapping[1] ,(1 + len(A_ ), 1 + len(A_ ) + 1 + len(A_ )) ,)
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _lowercase = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" from __future__ import annotations import math class lowerCAmelCase_ : '''simple docstring''' def __init__( self : str ,A_ : int ) -> None: A = size # approximate the overall size of segment tree with given value A = [0 for i in range(0 ,4 * size )] # create array to store lazy update A = [0 for i in range(0 ,4 * size )] A = [0 for i in range(0 ,4 * size )] # flag for lazy update def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> int: return idx * 2 def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> int: return idx * 2 + 1 def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : int ,A_ : int ,A_ : list[int] ) -> None: if left_element == right_element: A = a[left_element - 1] else: A = (left_element + right_element) // 2 self.build(self.left(A_ ) ,A_ ,A_ ,A_ ) self.build(self.right(A_ ) ,mid + 1 ,A_ ,A_ ) A = max( self.segment_tree[self.left(A_ )] ,self.segment_tree[self.right(A_ )] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : int ,A_ : int ,A_ : int ,A_ : int ,A_ : int ) -> bool: if self.flag[idx] is True: A = self.lazy[idx] A = False if left_element != right_element: A = self.lazy[idx] A = self.lazy[idx] A = True A = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: A = val if left_element != right_element: A = val A = val A = True A = True return True A = (left_element + right_element) // 2 self.update(self.left(A_ ) ,A_ ,A_ ,A_ ,A_ ,A_ ) self.update(self.right(A_ ) ,mid + 1 ,A_ ,A_ ,A_ ,A_ ) A = max( self.segment_tree[self.left(A_ )] ,self.segment_tree[self.right(A_ )] ) return True def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : int ,A_ : int ,A_ : int ,A_ : int ) -> int | float: if self.flag[idx] is True: A = self.lazy[idx] A = False if left_element != right_element: A = self.lazy[idx] A = self.lazy[idx] A = True A = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] A = (left_element + right_element) // 2 A = self.query(self.left(A_ ) ,A_ ,A_ ,A_ ,A_ ) A = self.query(self.right(A_ ) ,mid + 1 ,A_ ,A_ ,A_ ) return max(A_ ,A_ ) def __str__( self : Union[str, Any] ) -> str: return str([self.query(1 ,1 ,self.size ,A_ ,A_ ) for i in range(1 ,self.size + 1 )] ) if __name__ == "__main__": _lowercase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] _lowercase = 15 _lowercase = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 1_11) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 2_35) print(segt)
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''') @total_ordering @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None _lowerCamelCase: Optional[Union[str, int]] = None def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A , A , A = _str_to_version_tuple(self.version_str ) def __repr__( self : Optional[Any] ) -> Tuple: return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: return self.major, self.minor, self.patch def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> Any: if isinstance(A_ ,A_ ): return Version(A_ ) elif isinstance(A_ ,A_ ): return other raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' ) def __eq__( self : Union[str, Any] ,A_ : Optional[int] ) -> List[Any]: try: A = self._validate_operand(A_ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Optional[int] ,A_ : Optional[int] ) -> Any: A = self._validate_operand(A_ ) return self.tuple < other.tuple def __hash__( self : Union[str, Any] ) -> List[str]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ,A_ : str ) -> Optional[int]: A = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self.version_str def _snake_case ( snake_case__ : Optional[int] ): A = _VERSION_REG.match(snake_case__ ) if not res: raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] ) def _snake_case ( snake_case__ : Optional[Any] ): return ".".join(str(snake_case__ ) for v in version_tuple )
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Any ,A_ : Any ) -> Optional[Any]: A = val A = None A = None def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Union[str, Any]: if self.val: if val < self.val: if self.left is None: A = Node(A_ ) else: self.left.insert(A_ ) elif val > self.val: if self.right is None: A = Node(A_ ) else: self.right.insert(A_ ) else: A = val def _snake_case ( snake_case__ : str , snake_case__ : List[Any] ): # Recursive traversal if root: inorder(root.left , snake_case__ ) res.append(root.val ) inorder(root.right , snake_case__ ) def _snake_case ( snake_case__ : Tuple ): # Build BST if len(snake_case__ ) == 0: return arr A = Node(arr[0] ) for i in range(1 , len(snake_case__ ) ): root.insert(arr[i] ) # Traverse BST in order. A = [] inorder(snake_case__ , snake_case__ ) return res if __name__ == "__main__": print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deformable_detr import DeformableDetrImageProcessor _lowercase = logging.get_logger(__name__) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,*A_ : str ,**A_ : List[str] ) -> None: warnings.warn( 'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use DeformableDetrImageProcessor instead.' ,A_ ,) super().__init__(*A_ ,**A_ )
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from math import pow def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : int , ): if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count A = int(pow(snake_case__ , snake_case__ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n A , A = backtrack( snake_case__ , snake_case__ , current_number + 1 , snake_case__ , snake_case__ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. A , A = backtrack( snake_case__ , snake_case__ , current_number + 1 , snake_case__ , snake_case__ ) return current_sum, solutions_count def _snake_case ( snake_case__ : int , snake_case__ : int ): if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( 'Invalid input\n' 'needed_sum must be between 1 and 1000, power between 2 and 10.' ) return backtrack(snake_case__ , snake_case__ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = BarthezTokenizer _lowerCamelCase: Union[str, Any] = BarthezTokenizerFast _lowerCamelCase: Tuple = True _lowerCamelCase: List[Any] = True def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: super().setUp() A = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ,legacy_format=A_ ) A = tokenizer def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = '<pad>' A = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'<s>' ) self.assertEqual(vocab_keys[1] ,'<pad>' ) self.assertEqual(vocab_keys[-1] ,'<mask>' ) self.assertEqual(len(A_ ) ,10_1122 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: self.assertEqual(self.get_tokenizer().vocab_size ,10_1122 ) @require_torch def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] A = [0, 57, 3018, 7_0307, 91, 2] A = self.tokenizer( A_ ,max_length=len(A_ ) ,padding=A_ ,truncation=A_ ,return_tensors='pt' ) self.assertIsInstance(A_ ,A_ ) self.assertEqual((2, 6) ,batch.input_ids.shape ) self.assertEqual((2, 6) ,batch.attention_mask.shape ) A = batch.input_ids.tolist()[0] self.assertListEqual(A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str: if not self.test_rust_tokenizer: return A = self.get_tokenizer() A = self.get_rust_tokenizer() A = 'I was born in 92000, and this is falsé.' A = tokenizer.tokenize(A_ ) A = rust_tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokenizer.encode(A_ ,add_special_tokens=A_ ) A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ ) self.assertListEqual(A_ ,A_ ) A = self.get_rust_tokenizer() A = tokenizer.encode(A_ ) A = rust_tokenizer.encode(A_ ) self.assertListEqual(A_ ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: # fmt: off A = {'input_ids': [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. A = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=A_ ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=A_ ,)
91
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: str = AudioLDMPipeline _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_PARAMS _lowerCamelCase: Optional[int] = TEXT_TO_AUDIO_BATCH_PARAMS _lowerCamelCase: Optional[int] = frozenset( [ '''num_inference_steps''', '''num_waveforms_per_prompt''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: torch.manual_seed(0 ) A = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=(32, 64) ,class_embed_type='simple_projection' ,projection_class_embeddings_input_dim=32 ,class_embeddings_concat=A_ ,) A = DDIMScheduler( beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=A_ ,set_alpha_to_one=A_ ,) torch.manual_seed(0 ) A = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=1 ,out_channels=1 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,) torch.manual_seed(0 ) A = ClapTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,projection_dim=32 ,) A = ClapTextModelWithProjection(A_ ) A = RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' ,model_max_length=77 ) A = SpeechTaHifiGanConfig( model_in_dim=8 ,sampling_rate=1_6000 ,upsample_initial_channel=16 ,upsample_rates=[2, 2] ,upsample_kernel_sizes=[4, 4] ,resblock_kernel_sizes=[3, 7] ,resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] ,normalize_before=A_ ,) A = SpeechTaHifiGan(A_ ) A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'vocoder': vocoder, } return components def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Dict=0 ) -> str: if str(A_ ).startswith('mps' ): A = torch.manual_seed(A_ ) else: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, } return inputs def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) A = prompt_embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 3 * ['this is a negative prompt'] A = negative_prompt A = 3 * [inputs['prompt']] # forward A = audioldm_pipe(**A_ ) A = output.audios[0] A = self.get_dummy_inputs(A_ ) A = 3 * [inputs.pop('prompt' )] A = [] for p in [prompt, negative_prompt]: A = audioldm_pipe.tokenizer( A_ ,padding='max_length' ,max_length=audioldm_pipe.tokenizer.model_max_length ,truncation=A_ ,return_tensors='pt' ,) A = text_inputs['input_ids'].to(A_ ) A = audioldm_pipe.text_encoder( A_ ,) A = text_embeds.text_embeds # additional L_2 normalization over each hidden-state A = F.normalize(A_ ,dim=-1 ) embeds.append(A_ ) A , A = embeds # forward A = audioldm_pipe(**A_ ) A = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs(A_ ) A = 'egg cracking' A = audioldm_pipe(**A_ ,negative_prompt=A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) == 256 A = audio[:10] A = np.array( [-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] ) assert np.abs(audio_slice - expected_slice ).max() < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = PNDMScheduler(skip_prk_steps=A_ ) A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = 'A hammer hitting a wooden surface' # test num_waveforms_per_prompt=1 (default) A = audioldm_pipe(A_ ,num_inference_steps=2 ).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts A = 2 A = audioldm_pipe([prompt] * batch_size ,num_inference_steps=2 ).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt A = 2 A = audioldm_pipe(A_ ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts A = 2 A = audioldm_pipe( [prompt] * batch_size ,num_inference_steps=2 ,num_waveforms_per_prompt=A_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: A = 'cpu' # ensure determinism for the device-dependent torch.Generator A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = audioldm_pipe.vocoder.config.sampling_rate A = self.get_dummy_inputs(A_ ) A = audioldm_pipe(audio_length_in_s=0.0_16 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_16 A = audioldm_pipe(audio_length_in_s=0.0_32 ,**A_ ) A = output.audios[0] assert audio.ndim == 1 assert len(A_ ) / vocoder_sampling_rate == 0.0_32 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: A = self.get_dummy_components() A = AudioLDMPipeline(**A_ ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = ['hey'] A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape assert audio_shape == (1, 256) A = audioldm_pipe.vocoder.config config.model_in_dim *= 2 A = SpeechTaHifiGan(A_ ).to(A_ ) A = audioldm_pipe(A_ ,num_inference_steps=1 ) A = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: self._test_inference_batch_single_identical(test_mean_pixel_difference=A_ ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A_ ) @slow class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : str="cpu" ,A_ : List[str]=torch.floataa ,A_ : str=0 ) -> List[Any]: A = torch.Generator(device=A_ ).manual_seed(A_ ) A = np.random.RandomState(A_ ).standard_normal((1, 8, 128, 16) ) A = torch.from_numpy(A_ ).to(device=A_ ,dtype=A_ ) A = { 'prompt': 'A hammer hitting a wooden surface', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 2.5, } return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = 25 A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[7_7230:7_7240] A = np.array( [-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = AudioLDMPipeline.from_pretrained('cvssp/audioldm' ) A = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) A = audioldm_pipe.to(A_ ) audioldm_pipe.set_progress_bar_config(disable=A_ ) A = self.get_inputs(A_ ) A = audioldm_pipe(**A_ ).audios[0] assert audio.ndim == 1 assert len(A_ ) == 8_1920 A = audio[2_7780:2_7790] A = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] ) A = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3e-2
91
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_xlm_roberta_xl''': [ '''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaXLConfig''', '''XLMRobertaXLOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaXLForCausalLM''', '''XLMRobertaXLForMaskedLM''', '''XLMRobertaXLForMultipleChoice''', '''XLMRobertaXLForQuestionAnswering''', '''XLMRobertaXLForSequenceClassification''', '''XLMRobertaXLForTokenClassification''', '''XLMRobertaXLModel''', '''XLMRobertaXLPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaXLConfig, XLMRobertaXLOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta_xl import ( XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaXLForCausalLM, XLMRobertaXLForMaskedLM, XLMRobertaXLForMultipleChoice, XLMRobertaXLForQuestionAnswering, XLMRobertaXLForSequenceClassification, XLMRobertaXLForTokenClassification, XLMRobertaXLModel, XLMRobertaXLPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
91
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Union[str, Any] ,A_ : List[Any]=13 ,A_ : Optional[Any]=7 ,A_ : str=True ,A_ : List[Any]=True ,A_ : List[str]=True ,A_ : List[Any]=True ,A_ : Optional[int]=99 ,A_ : int=32 ,A_ : int=2 ,A_ : Any=4 ,A_ : Any=37 ,A_ : str="gelu" ,A_ : Tuple=0.1 ,A_ : List[str]=0.1 ,A_ : Union[str, Any]=512 ,A_ : Optional[int]=16 ,A_ : Tuple=2 ,A_ : Optional[Any]=0.02 ,A_ : int=3 ,A_ : Tuple=4 ,A_ : int=None ,A_ : Optional[int]=0 ,) -> Optional[Any]: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope A = projection_dim def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = BertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) A = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ,A_ : Tuple ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ,A_ : Tuple ,A_ : List[str] ) -> List[Any]: A = TFDPRContextEncoder(config=A_ ) A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ ) A = model(A_ ,token_type_ids=A_ ) A = model(A_ ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Optional[Any] ,A_ : Tuple ) -> List[str]: A = TFDPRQuestionEncoder(config=A_ ) A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ ) A = model(A_ ,token_type_ids=A_ ) A = model(A_ ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : str ,A_ : Optional[int] ,A_ : str ,A_ : str ,A_ : List[Any] ) -> List[str]: A = TFDPRReader(config=A_ ) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids} return config, inputs_dict @require_tf class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Any = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) _lowerCamelCase: Any = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {} _lowerCamelCase: Tuple = False _lowerCamelCase: str = False _lowerCamelCase: int = False _lowerCamelCase: List[Any] = False _lowerCamelCase: Union[str, Any] = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = TFDPRModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFDPRContextEncoder.from_pretrained(A_ ) self.assertIsNotNone(A_ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFDPRContextEncoder.from_pretrained(A_ ) self.assertIsNotNone(A_ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFDPRQuestionEncoder.from_pretrained(A_ ) self.assertIsNotNone(A_ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = TFDPRReader.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_tf class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' ) A = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP] A = model(A_ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. A = tf.constant( [ [ 0.03_23_62_53, 0.12_75_33_35, 0.16_81_85_09, 0.00_27_97_86, 0.3_89_69_33, 0.24_26_49_45, 0.2_17_89_71, -0.02_33_52_27, -0.08_48_19_59, -0.14_32_41_17, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
91
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input _lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def _snake_case ( ): A = _ask_options( 'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: A = get_sagemaker_input() else: A = get_cluster_input() return config def _snake_case ( snake_case__ : Any=None ): if subparsers is not None: A = subparsers.add_parser('config' , description=snake_case__ ) else: A = argparse.ArgumentParser('Accelerate config command' , description=snake_case__ ) parser.add_argument( '--config_file' , default=snake_case__ , help=( 'The path to use to store the config file. Will default to a file named default_config.yaml in the cache ' 'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ' 'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ' 'with \'huggingface\'.' ) , ) if subparsers is not None: parser.set_defaults(func=snake_case__ ) return parser def _snake_case ( snake_case__ : Tuple ): A = get_user_input() if args.config_file is not None: A = args.config_file else: if not os.path.isdir(snake_case__ ): os.makedirs(snake_case__ ) A = default_yaml_config_file if config_file.endswith('.json' ): config.to_json_file(snake_case__ ) else: config.to_yaml_file(snake_case__ ) print(F'accelerate configuration saved at {config_file}' ) def _snake_case ( ): A = config_command_parser() A = parser.parse_args() config_command(snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset _lowercase = '''bert-base-cased''' _lowercase = '''google/pegasus-xsum''' _lowercase = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] _lowercase = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] _lowercase = '''patrickvonplaten/t5-tiny-random''' _lowercase = '''sshleifer/bart-tiny-random''' _lowercase = '''sshleifer/tiny-mbart''' _lowercase = '''sshleifer/tiny-marian-en-de''' def _snake_case ( snake_case__ : Path , snake_case__ : list ): A = '\n'.join(snake_case__ ) Path(snake_case__ ).open('w' ).writelines(snake_case__ ) def _snake_case ( snake_case__ : str ): for split in ["train", "val", "test"]: _dump_articles(os.path.join(snake_case__ , F'{split}.source' ) , snake_case__ ) _dump_articles(os.path.join(snake_case__ , F'{split}.target' ) , snake_case__ ) return tmp_dir class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] ,) @slow def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ) -> Union[str, Any]: A = AutoTokenizer.from_pretrained(A_ ) A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) A = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES ) A = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES ) A = 4 A = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated A , A = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. A = SeqaSeqDataset( A_ ,data_dir=A_ ,type_path='train' ,max_source_length=A_ ,max_target_length=A_ ,src_lang=A_ ,tgt_lang=A_ ,) A = DataLoader(A_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(A_ ,A_ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place A = shift_tokens_right(batch['labels'] ,tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ) -> Dict: A = AutoTokenizer.from_pretrained(A_ ) A = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) A = max(len(tokenizer.encode(A_ ) ) for a in ARTICLES ) A = max(len(tokenizer.encode(A_ ) ) for a in SUMMARIES ) A = 4 A = LegacySeqaSeqDataset( A_ ,data_dir=A_ ,type_path='train' ,max_source_length=20 ,max_target_length=A_ ,) A = DataLoader(A_ ,batch_size=2 ,collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: A = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) A = tmp_dir.joinpath('train.source' ).open().readlines() A = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(A_ ,A_ ,128 ,A_ ) A = {x.name for x in tmp_dir.iterdir()} A = {x.name for x in save_dir.iterdir()} A = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(A_ ) < len(A_ ) assert len(A_ ) == 1 assert len(packed_examples[0] ) == sum(len(A_ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE ,reason='This test requires fairseq' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: if not FAIRSEQ_AVAILABLE: return A , A , A = self._get_dataset(max_len=64 ) A = 64 A = ds.make_dynamic_sampler(A_ ,required_batch_size_multiple=A_ ) A = [len(A_ ) for x in batch_sampler] assert len(set(A_ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(A_ ) == len(A_ ) # no dropped or added examples A = DataLoader(A_ ,batch_sampler=A_ ,collate_fn=ds.collate_fn ,num_workers=2 ) A = [] A = [] for batch in data_loader: A = batch['input_ids'].shape A = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple A = np.product(batch['input_ids'].shape ) num_src_per_batch.append(A_ ) if num_src_tokens > (max_tokens * 1.1): failures.append(A_ ) assert num_src_per_batch[0] == max(A_ ) if failures: raise AssertionError(F'too many tokens in {len(A_ )} batches' ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: A , A , A = self._get_dataset(max_len=512 ) A = 2 A = ds.make_sortish_sampler(A_ ,shuffle=A_ ) A = DataLoader(A_ ,batch_size=A_ ,collate_fn=ds.collate_fn ,num_workers=2 ) A = DataLoader(A_ ,batch_size=A_ ,collate_fn=ds.collate_fn ,num_workers=2 ,sampler=A_ ) A = tokenizer.pad_token_id def count_pad_tokens(A_ : Tuple ,A_ : Union[str, Any]="input_ids" ): return [batch[k].eq(A_ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(A_ ,k='labels' ) ) < sum(count_pad_tokens(A_ ,k='labels' ) ) assert sum(count_pad_tokens(A_ ) ) < sum(count_pad_tokens(A_ ) ) assert len(A_ ) == len(A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[Any]=1000 ,A_ : Union[str, Any]=128 ) -> Union[str, Any]: if os.getenv('USE_REAL_DATA' ,A_ ): A = 'examples/seq2seq/wmt_en_ro' A = max_len * 2 * 64 if not Path(A_ ).joinpath('train.len' ).exists(): save_len_file(A_ ,A_ ) else: A = 'examples/seq2seq/test_data/wmt_en_ro' A = max_len * 4 save_len_file(A_ ,A_ ) A = AutoTokenizer.from_pretrained(A_ ) A = SeqaSeqDataset( A_ ,data_dir=A_ ,type_path='train' ,max_source_length=A_ ,max_target_length=A_ ,n_obs=A_ ,) return ds, max_tokens, tokenizer def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A , A , A = self._get_dataset() A = set(DistributedSortishSampler(A_ ,256 ,num_replicas=2 ,rank=0 ,add_extra_examples=A_ ) ) A = set(DistributedSortishSampler(A_ ,256 ,num_replicas=2 ,rank=1 ,add_extra_examples=A_ ) ) assert idsa.intersection(A_ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] ,) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Any ) -> Optional[int]: A = AutoTokenizer.from_pretrained(A_ ,use_fast=A_ ) if tok_name == MBART_TINY: A = SeqaSeqDataset( A_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='train' ,max_source_length=4 ,max_target_length=8 ,src_lang='EN' ,tgt_lang='FR' ,) A = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: A = SeqaSeqDataset( A_ ,data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ,type_path='train' ,max_source_length=4 ,max_target_length=8 ,) A = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(A_ ) == 1 if tok_name == BART_TINY else len(A_ ) == 0
91
"""simple docstring""" import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str: A = parent A = batch_size A = seq_length A = is_training A = use_input_mask A = use_token_type_ids A = use_labels A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = type_vocab_size A = type_sequence_label_size A = initializer_range A = num_labels A = num_choices A = scope def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A = None if self.use_input_mask: A = random_attention_mask([self.batch_size, self.seq_length] ) A = None if self.use_token_type_ids: A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A = None A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A = ids_tensor([self.batch_size] ,self.num_choices ) A = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]: A = LlamaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ) A = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]: A = True A = LlamaModel(A_ ) model.to(A_ ) model.eval() A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,) A = model(A_ ,attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]: A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]: A = True A = True A = LlamaForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,) A = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A = torch.cat([input_ids, next_tokens] ,dim=-1 ) A = torch.cat([input_mask, next_mask] ,dim=-1 ) A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] A = model( A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0] # select random slice A = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A = output_from_no_past[:, -3:, random_slice_idx].detach() A = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = self.prepare_config_and_inputs() ( ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ( A ) , ) = config_and_inputs A = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () _lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else () _lowerCamelCase: Any = ( { '''feature-extraction''': LlamaModel, '''text-classification''': LlamaForSequenceClassification, '''text-generation''': LlamaForCausalLM, '''zero-shot''': LlamaForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase: int = False _lowerCamelCase: List[str] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: A = LlamaModelTester(self ) A = ConfigTester(self ,config_class=A_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: A = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A = type self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'single_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = 3 A = 'multi_label_classification' A = input_dict['input_ids'] A = input_ids.ne(1 ).to(A_ ) A = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A = LlamaForSequenceClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,attention_mask=A_ ,labels=A_ ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('LLaMA buffers include complex numbers, which breaks this test' ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: pass @parameterized.expand([('linear',), ('dynamic',)] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str: A , A = self.model_tester.prepare_config_and_inputs_for_common() A = ids_tensor([1, 10] ,config.vocab_size ) A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = LlamaModel(A_ ) original_model.to(A_ ) original_model.eval() A = original_model(A_ ).last_hidden_state A = original_model(A_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A = {'type': scaling_type, 'factor': 10.0} A = LlamaModel(A_ ) scaled_model.to(A_ ) scaled_model.eval() A = scaled_model(A_ ).last_hidden_state A = scaled_model(A_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) ) @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' ) A = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) # Expected mean on dim = -1 A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( 'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = [1, 306, 4658, 278, 6593, 310, 2834, 338] A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' ) A = model(torch.tensor(A_ ) ) A = torch.tensor( [[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip('Model is curently gated' ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi' A = 'Simply put, the theory of relativity states that ' A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ) A = tokenizer.encode(A_ ,return_tensors='pt' ) A = LlamaForCausalLM.from_pretrained( 'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ ) # greedy generation outputs A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ ) A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ ) self.assertEqual(A_ ,A_ )
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase = { '''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''BloomTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BloomForCausalLM''', '''BloomModel''', '''BloomPreTrainedModel''', '''BloomForSequenceClassification''', '''BloomForTokenClassification''', '''BloomForQuestionAnswering''', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
1
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def _snake_case ( ): A = os.path.dirname(os.path.realpath(snake_case__ ) ) A = os.path.join(snake_case__ , 'words.txt' ) A = '' with open(snake_case__ ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(snake_case__ ) if __name__ == "__main__": print(solution())
91
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = '''mobilenet_v1''' def __init__( self : Optional[int] ,A_ : Optional[int]=3 ,A_ : Any=224 ,A_ : List[Any]=1.0 ,A_ : Union[str, Any]=8 ,A_ : Union[str, Any]="relu6" ,A_ : Optional[Any]=True ,A_ : List[str]=0.9_99 ,A_ : int=0.02 ,A_ : int=0.0_01 ,**A_ : Union[str, Any] ,) -> Dict: super().__init__(**A_ ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) A = num_channels A = image_size A = depth_multiplier A = min_depth A = hidden_act A = tf_padding A = classifier_dropout_prob A = initializer_range A = layer_norm_eps class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[int] = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> float: return 1e-4
91
1
"""simple docstring""" def _snake_case ( ): A = 0 for i in range(1 , 1001 ): total += i**i return str(snake_case__ )[-10:] if __name__ == "__main__": print(solution())
91
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowercase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" # Lint as: python3 import itertools import os import re _lowercase = re.compile(r'''([A-Z]+)([A-Z][a-z])''') _lowercase = re.compile(r'''([a-z\d])([A-Z])''') _lowercase = re.compile(r'''(?<!_)_(?!_)''') _lowercase = re.compile(r'''(_{2,})''') _lowercase = r'''^\w+(\.\w+)*$''' _lowercase = r'''<>:/\|?*''' def _snake_case ( snake_case__ : Optional[Any] ): A = _uppercase_uppercase_re.sub(r'\1_\2' , snake_case__ ) A = _lowercase_uppercase_re.sub(r'\1_\2' , snake_case__ ) return name.lower() def _snake_case ( snake_case__ : List[Any] ): A = _single_underscore_re.split(snake_case__ ) A = [_multiple_underscores_re.split(snake_case__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(snake_case__ ) if n != '' ) def _snake_case ( snake_case__ : int ): if os.path.basename(snake_case__ ) != name: raise ValueError(F'Should be a dataset name, not a path: {name}' ) return camelcase_to_snakecase(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : str ): if os.path.basename(snake_case__ ) != name: raise ValueError(F'Should be a dataset name, not a path: {name}' ) if not re.match(_split_re , snake_case__ ): raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' ) return F'{filename_prefix_for_name(snake_case__ )}-{split}' def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : List[Any]=None ): A = filename_prefix_for_split(snake_case__ , snake_case__ ) if filetype_suffix: prefix += F'.{filetype_suffix}' A = os.path.join(snake_case__ , snake_case__ ) return F'{filepath}*' def _snake_case ( snake_case__ : str , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : str=None , snake_case__ : Union[str, Any]=None ): A = filename_prefix_for_split(snake_case__ , snake_case__ ) A = os.path.join(snake_case__ , snake_case__ ) if shard_lengths: A = len(snake_case__ ) A = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(snake_case__ )] if filetype_suffix: A = [filename + F'.{filetype_suffix}' for filename in filenames] return filenames else: A = prefix if filetype_suffix: filename += F'.{filetype_suffix}' return [filename]
91
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _lowercase = datasets.utils.logging.get_logger(__name__) _lowercase = ['''names''', '''prefix'''] _lowercase = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols'''] _lowercase = ['''encoding_errors''', '''on_bad_lines'''] _lowercase = ['''date_format'''] @dataclass class lowerCAmelCase_ ( datasets.BuilderConfig ): '''simple docstring''' _lowerCamelCase: str = "," _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[Union[int, List[int], str]] = "infer" _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[List[str]] = None _lowerCamelCase: Optional[Union[int, str, List[int], List[str]]] = None _lowerCamelCase: Optional[Union[List[int], List[str]]] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: Optional[Literal["c", "python", "pyarrow"]] = None _lowerCamelCase: Dict[Union[int, str], Callable[[Any], Any]] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: Optional[list] = None _lowerCamelCase: bool = False _lowerCamelCase: Optional[Union[int, List[int]]] = None _lowerCamelCase: Optional[int] = None _lowerCamelCase: Optional[Union[str, List[str]]] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: bool = True _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = "." _lowerCamelCase: Optional[str] = None _lowerCamelCase: str = '"' _lowerCamelCase: int = 0 _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: Optional[str] = None _lowerCamelCase: bool = True _lowerCamelCase: bool = True _lowerCamelCase: int = 0 _lowerCamelCase: bool = True _lowerCamelCase: bool = False _lowerCamelCase: Optional[str] = None _lowerCamelCase: int = 10000 _lowerCamelCase: Optional[datasets.Features] = None _lowerCamelCase: Optional[str] = "strict" _lowerCamelCase: Literal["error", "warn", "skip"] = "error" _lowerCamelCase: Optional[str] = None def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: if self.delimiter is not None: A = self.delimiter if self.column_names is not None: A = self.column_names @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = { 'sep': self.sep, 'header': self.header, 'names': self.names, 'index_col': self.index_col, 'usecols': self.usecols, 'prefix': self.prefix, 'mangle_dupe_cols': self.mangle_dupe_cols, 'engine': self.engine, 'converters': self.converters, 'true_values': self.true_values, 'false_values': self.false_values, 'skipinitialspace': self.skipinitialspace, 'skiprows': self.skiprows, 'nrows': self.nrows, 'na_values': self.na_values, 'keep_default_na': self.keep_default_na, 'na_filter': self.na_filter, 'verbose': self.verbose, 'skip_blank_lines': self.skip_blank_lines, 'thousands': self.thousands, 'decimal': self.decimal, 'lineterminator': self.lineterminator, 'quotechar': self.quotechar, 'quoting': self.quoting, 'escapechar': self.escapechar, 'comment': self.comment, 'encoding': self.encoding, 'dialect': self.dialect, 'error_bad_lines': self.error_bad_lines, 'warn_bad_lines': self.warn_bad_lines, 'skipfooter': self.skipfooter, 'doublequote': self.doublequote, 'memory_map': self.memory_map, 'float_precision': self.float_precision, 'chunksize': self.chunksize, 'encoding_errors': self.encoding_errors, 'on_bad_lines': self.on_bad_lines, 'date_format': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,A_ ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class lowerCAmelCase_ ( datasets.ArrowBasedBuilder ): '''simple docstring''' _lowerCamelCase: Any = CsvConfig def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return datasets.DatasetInfo(features=self.config.features ) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ) -> str: if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A = dl_manager.download_and_extract(self.config.data_files ) if isinstance(A_ ,(str, list, tuple) ): A = data_files if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )] A = [] for split_name, files in data_files.items(): if isinstance(A_ ,A_ ): A = [files] A = [dl_manager.iter_files(A_ ) for file in files] splits.append(datasets.SplitGenerator(name=A_ ,gen_kwargs={'files': files} ) ) return splits def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : pa.Table ) -> pa.Table: if self.config.features is not None: A = self.config.features.arrow_schema if all(not require_storage_cast(A_ ) for feature in self.config.features.values() ): # cheaper cast A = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=A_ ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A = table_cast(A_ ,A_ ) return pa_table def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ) -> List[Any]: A = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(A_ ) else object for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(A_ ) ): A = pd.read_csv(A_ ,iterator=A_ ,dtype=A_ ,**self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(A_ ): A = pa.Table.from_pandas(A_ ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(A_ ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(A_ )}: {e}' ) raise
91
1
"""simple docstring""" def _snake_case ( snake_case__ : List[str] , snake_case__ : Optional[int] ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) A = (boundary[1] - boundary[0]) / steps A = boundary[0] A = boundary[1] A = make_points(snake_case__ , snake_case__ , snake_case__ ) A = 0.0 y += (h / 2.0) * f(snake_case__ ) for i in x_i: # print(i) y += h * f(snake_case__ ) y += (h / 2.0) * f(snake_case__ ) return y def _snake_case ( snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Any ): A = a + h while x < (b - h): yield x A = x + h def _snake_case ( snake_case__ : Optional[Any] ): # enter your function here A = (x - 0) * (x - 0) return y def _snake_case ( ): A = 0.0 # Lower bound of integration A = 1.0 # Upper bound of integration A = 10.0 # define number of steps or resolution A = [a, b] # define boundary of integration A = method_a(snake_case__ , snake_case__ ) print(F'y = {y}' ) if __name__ == "__main__": main()
91
"""simple docstring""" from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : Any ,A_ : Callable ,A_ : Optional[Features] = None ,A_ : str = None ,A_ : bool = False ,A_ : bool = False ,A_ : Optional[dict] = None ,A_ : Optional[int] = None ,**A_ : int ,) -> str: super().__init__( features=A_ ,cache_dir=A_ ,keep_in_memory=A_ ,streaming=A_ ,num_proc=A_ ,**A_ ,) A = Generator( cache_dir=A_ ,features=A_ ,generator=A_ ,gen_kwargs=A_ ,**A_ ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: # Build iterable dataset if self.streaming: A = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: A = None A = None A = None A = None self.builder.download_and_prepare( download_config=A_ ,download_mode=A_ ,verification_mode=A_ ,base_path=A_ ,num_proc=self.num_proc ,) A = self.builder.as_dataset( split='train' ,verification_mode=A_ ,in_memory=self.keep_in_memory ) return dataset
91
1
"""simple docstring""" from importlib import import_module from .logging import get_logger _lowercase = get_logger(__name__) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Dict ,A_ : Tuple ,A_ : str=None ) -> Union[str, Any]: A = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('__' ): setattr(self ,A_ ,getattr(A_ ,A_ ) ) A = module._original_module if isinstance(A_ ,_PatchedModuleObj ) else module class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: Union[str, Any] = [] def __init__( self : Optional[int] ,A_ : List[Any] ,A_ : str ,A_ : List[str] ,A_ : Any=None ) -> Tuple: A = obj A = target A = new A = target.split('.' )[0] A = {} A = attrs or [] def __enter__( self : Optional[int] ) -> List[str]: *A , A = self.target.split('.' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(A_ ) ): try: A = import_module('.'.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): A = getattr(self.obj ,A_ ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(A_ ,_PatchedModuleObj ) and obj_attr._original_module is submodule) ): A = obj_attr # patch at top level setattr(self.obj ,A_ ,_PatchedModuleObj(A_ ,attrs=self.attrs ) ) A = getattr(self.obj ,A_ ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(A_ ,A_ ,_PatchedModuleObj(getattr(A_ ,A_ ,A_ ) ,attrs=self.attrs ) ) A = getattr(A_ ,A_ ) # finally set the target attribute setattr(A_ ,A_ ,self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: A = getattr(import_module('.'.join(A_ ) ) ,A_ ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj ,A_ ) is attr_value: A = getattr(self.obj ,A_ ) setattr(self.obj ,A_ ,self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" A = globals()['__builtins__'][target_attr] setattr(self.obj ,A_ ,self.new ) else: raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' ) def __exit__( self : Optional[int] ,*A_ : Optional[int] ) -> int: for attr in list(self.original ): setattr(self.obj ,A_ ,self.original.pop(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: self.__enter__() self._active_patches.append(self ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
91
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: A = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A_ ,'tf_padding' ) ) self.parent.assertTrue(hasattr(A_ ,'depth_multiplier' ) ) class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] ,A_ : Optional[int] ,A_ : Optional[int]=13 ,A_ : Tuple=3 ,A_ : Any=32 ,A_ : Optional[Any]=0.25 ,A_ : Union[str, Any]=8 ,A_ : List[str]=True ,A_ : List[Any]=1024 ,A_ : str=32 ,A_ : Tuple="relu6" ,A_ : Optional[int]=0.1 ,A_ : int=0.02 ,A_ : int=True ,A_ : Any=True ,A_ : Optional[int]=10 ,A_ : List[Any]=None ,) -> List[Any]: A = parent A = batch_size A = num_channels A = image_size A = depth_multiplier A = min_depth A = tf_padding A = int(last_hidden_size * depth_multiplier ) A = output_stride A = hidden_act A = classifier_dropout_prob A = use_labels A = is_training A = num_labels A = initializer_range A = scope def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A = None A = None if self.use_labels: A = ids_tensor([self.batch_size] ,self.num_labels ) A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) A = self.get_config() return config, pixel_values, labels, pixel_labels def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: return MobileNetVaConfig( num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = MobileNetVaModel(config=A_ ) model.to(A_ ) model.eval() A = model(A_ ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Union[str, Any] ) -> Tuple: A = self.num_labels A = MobileNetVaForImageClassification(A_ ) model.to(A_ ) model.eval() A = model(A_ ,labels=A_ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: A = self.prepare_config_and_inputs() A , A , A , A = config_and_inputs A = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () _lowerCamelCase: Any = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) _lowerCamelCase: List[str] = False _lowerCamelCase: Optional[Any] = False _lowerCamelCase: List[Any] = False _lowerCamelCase: Union[str, Any] = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: A = MobileNetVaModelTester(self ) A = MobileNetVaConfigTester(self ,config_class=A_ ,has_text_modality=A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV1 does not use inputs_embeds' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: pass @unittest.skip(reason='MobileNetV1 does not support input and output embeddings' ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: pass @unittest.skip(reason='MobileNetV1 does not output attentions' ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = model_class(A_ ) A = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A = [*signature.parameters.keys()] A = ['pixel_values'] self.assertListEqual(arg_names[:1] ,A_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: def check_hidden_states_output(A_ : int ,A_ : Dict ,A_ : str ): A = model_class(A_ ) model.to(A_ ) model.eval() with torch.no_grad(): A = model(**self._prepare_for_class(A_ ,A_ ) ) A = outputs.hidden_states A = 26 self.assertEqual(len(A_ ) ,A_ ) A , A = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A = True check_hidden_states_output(A_ ,A_ ,A_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A = True check_hidden_states_output(A_ ,A_ ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: A = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A = MobileNetVaModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) def _snake_case ( ): A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @cached_property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: A = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(A_ ) A = self.default_image_processor A = prepare_img() A = image_processor(images=A_ ,return_tensors='pt' ).to(A_ ) # forward pass with torch.no_grad(): A = model(**A_ ) # verify the logits A = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape ,A_ ) A = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ).to(A_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A_ ,atol=1e-4 ) )
91
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Dict = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline''' def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str]=0 ) -> str: A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) ) A = np.random.RandomState(A_ ) A = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) # warmup pass to apply optimizations A = pipe(**self.get_dummy_inputs() ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' ) A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=A_ ) A = self.get_dummy_inputs() A = pipe(**A_ ).images A = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: A = ort.SessionOptions() A = False return options def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) # using the PNDM scheduler by default A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: A = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg' ) A = init_image.resize((768, 512) ) A = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' ) A = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,) pipe.set_progress_bar_config(disable=A_ ) A = 'A fantasy landscape, trending on artstation' A = np.random.RandomState(0 ) A = pipe( prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,) A = output.images A = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
91
1
"""simple docstring""" import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available _lowercase = logging.getLogger(__name__) @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str _lowerCamelCase: List[str] _lowerCamelCase: Optional[List[str]] @dataclass class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: List[int] _lowerCamelCase: List[int] _lowerCamelCase: Optional[List[int]] = None _lowerCamelCase: Optional[List[int]] = None class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = '''train''' _lowerCamelCase: Any = '''dev''' _lowerCamelCase: Optional[Any] = '''test''' class lowerCAmelCase_ : '''simple docstring''' @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Dict ,A_ : Union[Split, str] ) -> List[InputExample]: raise NotImplementedError @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : str ) -> List[str]: raise NotImplementedError @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[InputExample] ,A_ : List[str] ,A_ : int ,A_ : PreTrainedTokenizer ,A_ : Optional[int]=False ,A_ : Any="[CLS]" ,A_ : Optional[Any]=1 ,A_ : List[Any]="[SEP]" ,A_ : List[str]=False ,A_ : Union[str, Any]=False ,A_ : List[Any]=0 ,A_ : Optional[Any]=0 ,A_ : str=-100 ,A_ : Union[str, Any]=0 ,A_ : int=True ,) -> List[InputFeatures]: A = {label: i for i, label in enumerate(A_ )} A = [] for ex_index, example in enumerate(A_ ): if ex_index % 1_0000 == 0: logger.info('Writing example %d of %d' ,A_ ,len(A_ ) ) A = [] A = [] for word, label in zip(example.words ,example.labels ): A = tokenizer.tokenize(A_ ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(A_ ) > 0: tokens.extend(A_ ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. A = tokenizer.num_special_tokens_to_add() if len(A_ ) > max_seq_length - special_tokens_count: A = tokens[: (max_seq_length - special_tokens_count)] A = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] A = [sequence_a_segment_id] * len(A_ ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: A = [cls_token] + tokens A = [pad_token_label_id] + label_ids A = [cls_token_segment_id] + segment_ids A = tokenizer.convert_tokens_to_ids(A_ ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. A = [1 if mask_padding_with_zero else 0] * len(A_ ) # Zero-pad up to the sequence length. A = max_seq_length - len(A_ ) if pad_on_left: A = ([pad_token] * padding_length) + input_ids A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask A = ([pad_token_segment_id] * padding_length) + segment_ids A = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(A_ ) == max_seq_length assert len(A_ ) == max_seq_length assert len(A_ ) == max_seq_length assert len(A_ ) == max_seq_length if ex_index < 5: logger.info('*** Example ***' ) logger.info('guid: %s' ,example.guid ) logger.info('tokens: %s' ,' '.join([str(A_ ) for x in tokens] ) ) logger.info('input_ids: %s' ,' '.join([str(A_ ) for x in input_ids] ) ) logger.info('input_mask: %s' ,' '.join([str(A_ ) for x in input_mask] ) ) logger.info('segment_ids: %s' ,' '.join([str(A_ ) for x in segment_ids] ) ) logger.info('label_ids: %s' ,' '.join([str(A_ ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: A = None features.append( InputFeatures( input_ids=A_ ,attention_mask=A_ ,token_type_ids=A_ ,label_ids=A_ ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[InputFeatures] _lowerCamelCase: int = nn.CrossEntropyLoss().ignore_index def __init__( self : Tuple ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Optional[Any]=False ,A_ : Split = Split.train ,) -> Union[str, Any]: # Load data features from cache or dataset file A = os.path.join( A_ ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(A_ ) ) ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A = cached_features_file + '.lock' with FileLock(A_ ): if os.path.exists(A_ ) and not overwrite_cache: logger.info(F'Loading features from cached file {cached_features_file}' ) A = torch.load(A_ ) else: logger.info(F'Creating features from dataset file at {data_dir}' ) A = token_classification_task.read_examples_from_file(A_ ,A_ ) # TODO clean up all this to leverage built-in features of tokenizers A = token_classification_task.convert_examples_to_features( A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) logger.info(F'Saving features into cached file {cached_features_file}' ) torch.save(self.features ,A_ ) def __len__( self : List[str] ) -> str: return len(self.features ) def __getitem__( self : List[str] ,A_ : int ) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: List[InputFeatures] _lowerCamelCase: int = -100 def __init__( self : List[Any] ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Union[str, Any]=False ,A_ : Split = Split.train ,) -> Union[str, Any]: A = token_classification_task.read_examples_from_file(A_ ,A_ ) # TODO clean up all this to leverage built-in features of tokenizers A = token_classification_task.convert_examples_to_features( A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: A = tf.data.Dataset.from_generator( A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,( {'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )}, tf.TensorShape([None] ), ) ,) else: A = tf.data.Dataset.from_generator( A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,( { 'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] ), 'token_type_ids': tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) ,) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Optional[int] ) -> Optional[Any]: return len(self.features ) def __getitem__( self : Optional[Any] ,A_ : Union[str, Any] ) -> InputFeatures: return self.features[i]
91
"""simple docstring""" from __future__ import annotations def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ): A , A = position A = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] A = [] for position in positions: A , A = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case__ ) return permissible_positions def _snake_case ( snake_case__ : list[list[int]] ): return not any(elem == 0 for row in board for elem in row ) def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ): if is_complete(snake_case__ ): return True for position in get_valid_pos(snake_case__ , len(snake_case__ ) ): A , A = position if board[y][x] == 0: A = curr + 1 if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ): return True A = 0 return False def _snake_case ( snake_case__ : int ): A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] for i in range(snake_case__ ): for j in range(snake_case__ ): A = 1 if open_knight_tour_helper(snake_case__ , (i, j) , 1 ): return board A = 0 A = F'Open Kight Tour cannot be performed on a board of size {n}' raise ValueError(snake_case__ ) if __name__ == "__main__": import doctest doctest.testmod()
91
1
"""simple docstring""" def _snake_case ( snake_case__ : int , snake_case__ : int ): while b: A , A = b, a % b return a def _snake_case ( snake_case__ : int , snake_case__ : int ): return a if b == 0 else euclidean_gcd_recursive(snake_case__ , a % b ) def _snake_case ( ): print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
91
"""simple docstring""" import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: Optional[int] = BlenderbotSmallTokenizer _lowerCamelCase: List[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() A = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__'] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', ''] A = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'} A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(A_ ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ,**A_ : Union[str, Any] ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Tuple ) -> List[Any]: A = 'adapt act apte' A = 'adapt act apte' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: A = BlenderbotSmallTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) A = 'adapt act apte' A = ['adapt', 'act', 'ap@@', 'te'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] A = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) assert tok('sam' ).input_ids == [1384] A = 'I am a small frog.' A = tok([src_text] ,padding=A_ ,truncation=A_ )['input_ids'] A = tok.batch_decode(A_ ,skip_special_tokens=A_ ,clean_up_tokenization_spaces=A_ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: A = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) A = 'I am a small frog .' A = '.' A = tok(A_ )['input_ids'] A = tok(A_ )['input_ids'] assert encoded[-1] == encoded_dot[0]
91
1
"""simple docstring""" class lowerCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] ) -> None: A = {} # Mapping from char to TrieNode A = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : list[str] ) -> None: for word in words: self.insert(A_ ) def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> None: A = self for char in word: if char not in curr.nodes: A = TrieNode() A = curr.nodes[char] A = True def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str ) -> bool: A = self for char in word: if char not in curr.nodes: return False A = curr.nodes[char] return curr.is_leaf def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ) -> None: def _delete(A_ : TrieNode ,A_ : str ,A_ : int ) -> bool: if index == len(A_ ): # If word does not exist if not curr.is_leaf: return False A = False return len(curr.nodes ) == 0 A = word[index] A = curr.nodes.get(A_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted A = _delete(A_ ,A_ ,index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self ,A_ ,0 ) def _snake_case ( snake_case__ : TrieNode , snake_case__ : str ): if node.is_leaf: print(snake_case__ , end=' ' ) for key, value in node.nodes.items(): print_words(snake_case__ , word + key ) def _snake_case ( ): A = 'banana bananas bandana band apple all beast'.split() A = TrieNode() root.insert_many(snake_case__ ) # print_words(root, "") assert all(root.find(snake_case__ ) for word in words ) assert root.find('banana' ) assert not root.find('bandanas' ) assert not root.find('apps' ) assert root.find('apple' ) assert root.find('all' ) root.delete('all' ) assert not root.find('all' ) root.delete('banana' ) assert not root.find('banana' ) assert root.find('bananas' ) return True def _snake_case ( snake_case__ : str , snake_case__ : bool ): print(str(snake_case__ ) , 'works!' if passes else 'doesn\'t work :(' ) def _snake_case ( ): assert test_trie() def _snake_case ( ): print_results('Testing trie functionality' , test_trie() ) if __name__ == "__main__": main()
91
"""simple docstring""" from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: List[str] = ['''image_processor''', '''tokenizer'''] _lowerCamelCase: Optional[int] = '''Pix2StructImageProcessor''' _lowerCamelCase: Dict = ('''T5Tokenizer''', '''T5TokenizerFast''') def __init__( self : Optional[int] ,A_ : List[str] ,A_ : Optional[int] ) -> int: A = False super().__init__(A_ ,A_ ) def __call__( self : Any ,A_ : List[str]=None ,A_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,A_ : bool = True ,A_ : Union[bool, str, PaddingStrategy] = False ,A_ : Union[bool, str, TruncationStrategy] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = 2048 ,A_ : int = 0 ,A_ : Optional[int] = None ,A_ : Optional[bool] = None ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = False ,A_ : bool = True ,A_ : Optional[Union[str, TensorType]] = None ,**A_ : Tuple ,) -> BatchEncoding: if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None and not self.image_processor.is_vqa: A = self.tokenizer A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) return text_encoding if not self.image_processor.is_vqa: # add pixel_values A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,**A_ ) else: # add pixel_values and bbox A = self.image_processor( A_ ,return_tensors=A_ ,max_patches=A_ ,header_text=A_ ,**A_ ) if text is not None and not self.image_processor.is_vqa: A = self.tokenizer( text=A_ ,add_special_tokens=A_ ,padding=A_ ,truncation=A_ ,max_length=A_ ,stride=A_ ,pad_to_multiple_of=A_ ,return_attention_mask=A_ ,return_overflowing_tokens=A_ ,return_special_tokens_mask=A_ ,return_offsets_mapping=A_ ,return_token_type_ids=A_ ,return_length=A_ ,verbose=A_ ,return_tensors=A_ ,**A_ ,) if "attention_mask" in text_encoding: A = text_encoding.pop('attention_mask' ) if "input_ids" in text_encoding: A = text_encoding.pop('input_ids' ) else: A = None if text_encoding is not None: encoding_image_processor.update(A_ ) return encoding_image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]: return self.tokenizer.batch_decode(*A_ ,**A_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : Tuple ,**A_ : List[str] ) -> Any: return self.tokenizer.decode(*A_ ,**A_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: A = self.tokenizer.model_input_names A = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
91
1
"""simple docstring""" import argparse import datetime def _snake_case ( snake_case__ : str ): A = { '0': 'Sunday', '1': 'Monday', '2': 'Tuesday', '3': 'Wednesday', '4': 'Thursday', '5': 'Friday', '6': 'Saturday', } A = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0} # Validate if not 0 < len(snake_case__ ) < 11: raise ValueError('Must be 10 characters long' ) # Get month A = int(date_input[0] + date_input[1] ) # Validate if not 0 < m < 13: raise ValueError('Month must be between 1 - 12' ) A = date_input[2] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get day A = int(date_input[3] + date_input[4] ) # Validate if not 0 < d < 32: raise ValueError('Date must be between 1 - 31' ) # Get second separator A = date_input[5] # Validate if sep_a not in ["-", "/"]: raise ValueError('Date separator must be \'-\' or \'/\'' ) # Get year A = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] ) # Arbitrary year range if not 45 < y < 8500: raise ValueError( 'Year out of range. There has to be some sort of limit...right?' ) # Get datetime obj for validation A = datetime.date(int(snake_case__ ) , int(snake_case__ ) , int(snake_case__ ) ) # Start math if m <= 2: A = y - 1 A = m + 12 # maths var A = int(str(snake_case__ )[:2] ) A = int(str(snake_case__ )[2:] ) A = int(2.6 * m - 5.39 ) A = int(c / 4 ) A = int(k / 4 ) A = int(d + k ) A = int(t + u + v + x ) A = int(z - (2 * c) ) A = round(w % 7 ) # End math # Validate math if f != convert_datetime_days[dt_ck.weekday()]: raise AssertionError('The date was evaluated incorrectly. Contact developer.' ) # Response A = F'Your date {date_input}, is a {days[str(snake_case__ )]}!' return response if __name__ == "__main__": import doctest doctest.testmod() _lowercase = argparse.ArgumentParser( description=( '''Find out what day of the week nearly any date is or was. Enter ''' '''date as a string in the mm-dd-yyyy or mm/dd/yyyy format''' ) ) parser.add_argument( '''date_input''', type=str, help='''Date as a string (mm-dd-yyyy or mm/dd/yyyy)''' ) _lowercase = parser.parse_args() zeller(args.date_input)
91
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowercase = logging.get_logger(__name__) _lowercase = '''▁''' _lowercase = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowercase = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowercase = { '''xlm-roberta-base''': 5_12, '''xlm-roberta-large''': 5_12, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_12, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_12, '''xlm-roberta-large-finetuned-conll03-english''': 5_12, '''xlm-roberta-large-finetuned-conll03-german''': 5_12, } class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' _lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase: Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase: Any = ['''input_ids''', '''attention_mask'''] def __init__( self : Union[str, Any] ,A_ : str ,A_ : str="<s>" ,A_ : Any="</s>" ,A_ : Tuple="</s>" ,A_ : Any="<s>" ,A_ : Optional[Any]="<unk>" ,A_ : int="<pad>" ,A_ : str="<mask>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : Optional[int] ,) -> None: # Mask token behave like a normal word, i.e. include the space before it A = AddedToken(A_ ,lstrip=A_ ,rstrip=A_ ) if isinstance(A_ ,A_ ) else mask_token A = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,sep_token=A_ ,cls_token=A_ ,pad_token=A_ ,mask_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,) A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A_ ) ) A = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token A = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab A = 1 A = len(self.sp_model ) + self.fairseq_offset A = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Union[str, Any] ) -> Any: A = self.__dict__.copy() A = None A = self.sp_model.serialized_model_proto() return state def __setstate__( self : str ,A_ : str ) -> Optional[Any]: A = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): A = {} A = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A = [self.cls_token_id] A = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ ) if token_ids_a is None: return [1] + ([0] * len(A_ )) + [1] return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[int]: A = [self.sep_token_id] A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]: return self.sp_model.encode(A_ ,out_type=A_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Tuple: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] A = self.sp_model.PieceToId(A_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]: A = ''.join(A_ ).replace(A_ ,' ' ).strip() return out_string def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return A = os.path.join( A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,A_ ) elif not os.path.isfile(self.vocab_file ): with open(A_ ,'wb' ) as fi: A = self.sp_model.serialized_model_proto() fi.write(A_ ) return (out_vocab_file,)
91
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available _lowercase = {'''tokenization_herbert''': ['''HerbertTokenizer''']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = ['''HerbertTokenizerFast'''] if TYPE_CHECKING: from .tokenization_herbert import HerbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_herbert_fast import HerbertTokenizerFast else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
1
"""simple docstring""" from __future__ import annotations import math def _snake_case ( snake_case__ : list , snake_case__ : list ): if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2: raise Exception('Matrices are not 2x2' ) A = [ [a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]], [a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]], ] return new_matrix def _snake_case ( snake_case__ : list , snake_case__ : list ): return [ [matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(snake_case__ ) ) ] def _snake_case ( snake_case__ : list , snake_case__ : list ): return [ [matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )] for row in range(len(snake_case__ ) ) ] def _snake_case ( snake_case__ : list ): if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0: raise Exception('Odd matrices are not supported!' ) A = len(snake_case__ ) A = matrix_length // 2 A = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )] A = [ [a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ ) ] A = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )] A = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )] return top_left, top_right, bot_left, bot_right def _snake_case ( snake_case__ : list ): return len(snake_case__ ), len(matrix[0] ) def _snake_case ( snake_case__ : list ): print('\n'.join(str(snake_case__ ) for line in matrix ) ) def _snake_case ( snake_case__ : list , snake_case__ : list ): if matrix_dimensions(snake_case__ ) == (2, 2): return default_matrix_multiplication(snake_case__ , snake_case__ ) A , A , A , A = split_matrix(snake_case__ ) A , A , A , A = split_matrix(snake_case__ ) A = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) ) A = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) A = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) A = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) ) A = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) A = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) A = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) ) A = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ ) A = matrix_addition(snake_case__ , snake_case__ ) A = matrix_addition(snake_case__ , snake_case__ ) A = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ ) # construct the new matrix from our 4 quadrants A = [] for i in range(len(snake_case__ ) ): new_matrix.append(top_left[i] + top_right[i] ) for i in range(len(snake_case__ ) ): new_matrix.append(bot_left[i] + bot_right[i] ) return new_matrix def _snake_case ( snake_case__ : list , snake_case__ : list ): if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]: A = ( 'Unable to multiply these matrices, please check the dimensions.\n' F'Matrix A: {matrixa}\n' F'Matrix B: {matrixa}' ) raise Exception(snake_case__ ) A = matrix_dimensions(snake_case__ ) A = matrix_dimensions(snake_case__ ) if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]: return [matrixa, matrixa] A = max(*snake_case__ , *snake_case__ ) A = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) ) A = matrixa A = matrixa # Adding zeros to the matrices so that the arrays dimensions are the same and also # power of 2 for i in range(0 , snake_case__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): new_matrixa[i].append(0 ) else: new_matrixa.append([0] * maxim ) A = actual_strassen(snake_case__ , snake_case__ ) # Removing the additional zeros for i in range(0 , snake_case__ ): if i < dimensiona[0]: for _ in range(dimensiona[1] , snake_case__ ): final_matrix[i].pop() else: final_matrix.pop() return final_matrix if __name__ == "__main__": _lowercase = [ [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 4, 3, 1], [2, 3, 6, 7], [3, 1, 2, 4], [2, 3, 4, 5], [6, 2, 3, 1], ] _lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]] print(strassen(matrixa, matrixa))
91
"""simple docstring""" from torch import nn def _snake_case ( snake_case__ : Union[str, Any] ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'Unsupported activation function: {act_fn}' )
91
1
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow if is_torch_available(): import torch from transformers import XLMRobertaModel @require_sentencepiece @require_tokenizers @require_torch class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: A = XLMRobertaModel.from_pretrained('xlm-roberta-base' ) A = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house A = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim A = torch.tensor( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A = model(A_ )['last_hidden_state'].detach() self.assertEqual(output.shape ,A_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: A = XLMRobertaModel.from_pretrained('xlm-roberta-large' ) A = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] ) # The dog is cute and lives in the garden house A = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim A = torch.tensor( [[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] ) # xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large') # xlmr.eval() # expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1] with torch.no_grad(): A = model(A_ )['last_hidden_state'].detach() self.assertEqual(output.shape ,A_ ) # compare the actual values for a slice of last dim self.assertTrue(torch.allclose(output[:, :, -1] ,A_ ,atol=1e-3 ) )
91
"""simple docstring""" import copy import re class lowerCAmelCase_ : '''simple docstring''' _lowerCamelCase: str = '''hp''' _lowerCamelCase: List[Any] = {} _lowerCamelCase: List[Any] = None @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple: A = prefix A = defaults cls.build_naming_info() @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int: if len(A_ ) == 0: return "" A = None if any(char.isdigit() for char in word ): raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' ) if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1 ,len(A_ ) + 1 ): A = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: A = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(A_ : Optional[Any] ): A = '' while integer != 0: A = chr(ord('A' ) + integer % 10 ) + s integer //= 10 return s A = 0 while True: A = word + '#' + int_to_alphabetic(A_ ) if sword in info["reverse_short_word"]: continue else: A = sword break A = short_word A = word return short_word @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: A = param_name.split('_' ) A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name A = ['', '_'] for separator in separators: A = separator.join(A_ ) if shortname not in info["reverse_short_param"]: A = shortname A = param_name return shortname return param_name @staticmethod def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple: A = TrialShortNamer.shortname_for_key(A_ ,A_ ) A = short_name A = param_name @classmethod def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]: if cls.NAMING_INFO is not None: return A = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } A = list(cls.DEFAULTS.keys() ) for k in field_keys: cls.add_new_param_name(A_ ,A_ ) A = info @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]: cls.build_naming_info() assert cls.PREFIX is not None A = [copy.copy(cls.PREFIX )] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(F'You should provide a default value for the param name {k} with value {v}' ) if v == cls.DEFAULTS[k]: # The default value is not added to the name continue A = cls.NAMING_INFO['short_param'][k] if isinstance(A_ ,A_ ): A = 1 if v else 0 A = '' if isinstance(A_ ,(int, float) ) else '-' A = F'{key}{sep}{v}' name.append(A_ ) return "_".join(A_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int: A = repr[len(cls.PREFIX ) + 1 :] if repr == "": A = [] else: A = repr.split('_' ) A = {} for value in values: if "-" in value: A , A = value.split('-' ) else: A = re.sub('[0-9.]' ,'' ,A_ ) A = float(re.sub('[^0-9.]' ,'' ,A_ ) ) A = cls.NAMING_INFO['reverse_short_param'][p_k] A = p_v for k in cls.DEFAULTS: if k not in parameters: A = cls.DEFAULTS[k] return parameters
91
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _snake_case ( ): A = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' ) return image def _snake_case ( snake_case__ : Optional[int] ): A = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def _snake_case ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Optional[Any] ): A = dct.pop(snake_case__ ) A = val def _snake_case ( snake_case__ : List[str] , snake_case__ : Optional[Any] ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases A = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' ) A = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict A = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) ) A = qkv_bias def _snake_case ( snake_case__ : Tuple ): A = 364 if 'coco' in model_name else 224 A = InstructBlipVisionConfig(image_size=snake_case__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: A = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: A = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: A = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2001 ).to_dict() elif "vicuna-13b" in model_name: A = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2001 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 A = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict() A = InstructBlipConfig(vision_config=snake_case__ , text_config=snake_case__ , qformer_config=snake_case__ ) return config, image_size @torch.no_grad() def _snake_case ( snake_case__ : Tuple , snake_case__ : List[Any]=None , snake_case__ : List[Any]=False ): A = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: A = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) A = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) A , A = get_blipa_config(snake_case__ ) A = InstructBlipForConditionalGeneration(snake_case__ ).eval() A = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } A , A = model_name_to_original[model_name] # load original model print('Loading original model...' ) A = 'cuda:1' if torch.cuda.is_available() else 'cpu' A = 'cuda:2' if torch.cuda.is_available() else 'cpu' A , A , A = load_model_and_preprocess( name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ ) original_model.eval() print('Done!' ) # update state dict keys A = original_model.state_dict() A = create_rename_keys(snake_case__ ) for src, dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): A = state_dict.pop(snake_case__ ) if key.startswith('Qformer.bert' ): A = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: A = key.replace('self' , 'attention' ) if "llm_proj" in key: A = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: A = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): A = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): A = key.replace('t5' , 'language' ) A = val # read in qv biases read_in_q_v_bias(snake_case__ , snake_case__ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(snake_case__ , strict=snake_case__ ) A = load_demo_image() A = 'What is unusual about this image?' # create processor A = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ ) A = InstructBlipProcessor( image_processor=snake_case__ , tokenizer=snake_case__ , qformer_tokenizer=snake_case__ , ) A = processor(images=snake_case__ , text=snake_case__ , return_tensors='pt' ).to(snake_case__ ) # make sure processor creates exact same pixel values A = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ ) A = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , snake_case__ ) original_model.to(snake_case__ ) hf_model.to(snake_case__ ) with torch.no_grad(): if "vicuna" in model_name: A = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits A = hf_model(**snake_case__ ).logits else: A = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits A = tokenizer('\n' , return_tensors='pt' ).input_ids.to(snake_case__ ) A = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) A = hf_model(**snake_case__ , labels=snake_case__ ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape A = 1e-4 if 'vicuna' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) , snake_case__ , atol=snake_case__ ) print('Looks ok!' ) print('Generating with original model...' ) A = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) A = hf_model.generate( **snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? A = 2 print('Original generation:' , snake_case__ ) A = processor.batch_decode(snake_case__ , skip_special_tokens=snake_case__ ) A = [text.strip() for text in output_text] print('HF generation:' , snake_case__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(snake_case__ ) hf_model.save_pretrained(snake_case__ ) if push_to_hub: processor.push_to_hub(F'Salesforce/{model_name}' ) hf_model.push_to_hub(F'Salesforce/{model_name}' ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() _lowercase = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) _lowercase = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
91
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(snake_case__ ): requests.request('GET' , 'https://huggingface.co' ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request('GET' , 'https://huggingface.co' , timeout=1.0 ) @pytest.mark.integration def _snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request('GET' , 'https://huggingface.co' ) def _snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(snake_case__ ): http_head('https://huggingface.co' )
91
1
"""simple docstring""" import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: A = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: A = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertTrue(is_safetensors_compatible(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: A = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: A = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] self.assertTrue(is_safetensors_compatible(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: A = [ 'safety_checker/pytorch_model.bin', 'safety_checker/model.safetensors', 'vae/diffusion_pytorch_model.bin', 'vae/diffusion_pytorch_model.safetensors', 'text_encoder/pytorch_model.bin', # Removed: 'text_encoder/model.safetensors', 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: A = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] A = 'fp16' self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = [ 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] A = 'fp16' self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> str: # pass variant but use the non-variant filenames A = [ 'unet/diffusion_pytorch_model.bin', 'unet/diffusion_pytorch_model.safetensors', ] A = 'fp16' self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: A = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] A = 'fp16' self.assertFalse(is_safetensors_compatible(A_ ,variant=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: A = [ 'text_encoder/pytorch_model.fp16.bin', 'text_encoder/model.fp16.safetensors', ] A = 'fp16' self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: # pass variant but use the non-variant filenames A = [ 'text_encoder/pytorch_model.bin', 'text_encoder/model.safetensors', ] A = 'fp16' self.assertTrue(is_safetensors_compatible(A_ ,variant=A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: A = [ 'safety_checker/pytorch_model.fp16.bin', 'safety_checker/model.fp16.safetensors', 'vae/diffusion_pytorch_model.fp16.bin', 'vae/diffusion_pytorch_model.fp16.safetensors', 'text_encoder/pytorch_model.fp16.bin', # 'text_encoder/model.fp16.safetensors', 'unet/diffusion_pytorch_model.fp16.bin', 'unet/diffusion_pytorch_model.fp16.safetensors', ] A = 'fp16' self.assertFalse(is_safetensors_compatible(A_ ,variant=A_ ) )
91
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase_ ( _lowercase , unittest.TestCase ): '''simple docstring''' _lowerCamelCase: List[str] = BioGptTokenizer _lowerCamelCase: Tuple = False def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] A = dict(zip(A_ ,range(len(A_ ) ) ) ) A = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(A_ ) ) def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ) -> int: A = 'lower newer' A = 'lower newer' return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: A = BioGptTokenizer(self.vocab_file ,self.merges_file ) A = 'lower' A = ['low', 'er</w>'] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ ,A_ ) A = tokens + ['<unk>'] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,A_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: A = BioGptTokenizer.from_pretrained('microsoft/biogpt' ) A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ ) A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ) A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
91
1
"""simple docstring""" import argparse from copy import deepcopy import numpy as np from datasets import ClassLabel, DatasetDict, load_dataset from evaluate import load from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, Trainer, TrainerCallback, TrainingArguments, set_seed, ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument('--model_ckpt' , type=snake_case__ , default='microsoft/unixcoder-base-nine' ) parser.add_argument('--num_epochs' , type=snake_case__ , default=5 ) parser.add_argument('--batch_size' , type=snake_case__ , default=6 ) parser.add_argument('--gradient_accumulation_steps' , type=snake_case__ , default=1 ) parser.add_argument('--freeze' , type=snake_case__ , default=snake_case__ ) parser.add_argument('--learning_rate' , type=snake_case__ , default=5e-4 ) parser.add_argument('--seed' , type=snake_case__ , default=0 ) parser.add_argument('--lr_scheduler_type' , type=snake_case__ , default='cosine' ) parser.add_argument('--num_warmup_steps' , type=snake_case__ , default=10 ) parser.add_argument('--weight_decay' , type=snake_case__ , default=0.01 ) parser.add_argument('--output_dir' , type=snake_case__ , default='./results' ) return parser.parse_args() _lowercase = load('''accuracy''') def _snake_case ( snake_case__ : Optional[int] ): A , A = eval_pred A = np.argmax(snake_case__ , axis=1 ) return metric.compute(predictions=snake_case__ , references=snake_case__ ) class lowerCAmelCase_ ( _lowercase ): '''simple docstring''' def __init__( self : str ,A_ : Dict ) -> None: super().__init__() A = trainer def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ,**A_ : Union[str, Any] ) -> int: if control.should_evaluate: A = deepcopy(A_ ) self._trainer.evaluate(eval_dataset=self._trainer.train_dataset ,metric_key_prefix='train' ) return control_copy def _snake_case ( ): A = get_args() set_seed(args.seed ) A = load_dataset('codeparrot/codecomplex' , split='train' ) A = dataset.train_test_split(test_size=0.2 ) A = train_test['test'].train_test_split(test_size=0.5 ) A = DatasetDict( { 'train': train_test['train'], 'test': test_validation['train'], 'valid': test_validation['test'], } ) print('Loading tokenizer and model' ) A = AutoTokenizer.from_pretrained(args.model_ckpt ) A = tokenizer.eos_token A = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 ) A = model.config.eos_token_id if args.freeze: for param in model.roberta.parameters(): A = False A = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) ) def tokenize(snake_case__ : Any ): A = tokenizer(example['src'] , truncation=snake_case__ , max_length=1024 ) A = labels.straint(example['complexity'] ) return { "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], "label": label, } A = train_test_validation.map( snake_case__ , batched=snake_case__ , remove_columns=train_test_validation['train'].column_names , ) A = DataCollatorWithPadding(tokenizer=snake_case__ ) A = TrainingArguments( output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , ) A = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=snake_case__ , data_collator=snake_case__ , compute_metrics=snake_case__ , ) print('Training...' ) trainer.add_callback(CustomCallback(snake_case__ ) ) trainer.train() if __name__ == "__main__": main()
91
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers _lowercase = float('''nan''') class lowerCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ,A_ : Tuple ) -> Any: A = sys.stdout A = open(A_ ,'a' ) def __getattr__( self : int ,A_ : Optional[Any] ) -> Tuple: return getattr(self.stdout ,A_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str: self.stdout.write(A_ ) # strip tqdm codes self.file.write(re.sub(R'^.*\r' ,'' ,A_ ,0 ,re.M ) ) def _snake_case ( snake_case__ : Optional[Any]=80 , snake_case__ : List[str]=False ): A = [] # deal with critical env vars A = ['CUDA_VISIBLE_DEVICES'] for key in env_keys: A = os.environ.get(snake_case__ , snake_case__ ) if val is not None: cmd.append(F'{key}={val}' ) # python executable (not always needed if the script is executable) A = sys.executable if full_python_path else sys.executable.split('/' )[-1] cmd.append(snake_case__ ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes A = [] A = '' while len(snake_case__ ) > 0: current_line += F'{cmd.pop(0 )} ' if len(snake_case__ ) == 0 or len(snake_case__ ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(snake_case__ ) A = '' return "\\\n".join(snake_case__ ) def _snake_case ( snake_case__ : str , snake_case__ : str ): # unwrap multi-line input A = re.sub(r'[\\\n]+' , ' ' , args.base_cmd ) # remove --output_dir if any and set our own A = re.sub('--output_dir\s+[^\s]+' , '' , args.base_cmd ) args.base_cmd += F' --output_dir {output_dir}' # ensure we have --overwrite_output_dir A = re.sub('--overwrite_output_dir\s+' , '' , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any] ): # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.22222222] )} , ) A = subprocess.run(snake_case__ , capture_output=snake_case__ , text=snake_case__ ) if verbose: print('STDOUT' , result.stdout ) print('STDERR' , result.stderr ) # save the streams A = variation.replace(' ' , '-' ) with open(Path(snake_case__ ) / F'log.{prefix}.stdout.txt' , 'w' ) as f: f.write(result.stdout ) with open(Path(snake_case__ ) / F'log.{prefix}.stderr.txt' , 'w' ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print('failed' ) return {target_metric_key: nan} with io.open(F'{output_dir}/all_results.json' , 'r' , encoding='utf-8' ) as f: A = json.load(snake_case__ ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : Optional[Any] , ): A = [] A = [] A = F'{id}: {variation:<{longest_variation_len}}' A = F'{preamble}: ' A = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(snake_case__ ) , desc=snake_case__ , leave=snake_case__ ): A = process_run_single( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) A = single_run_metrics[target_metric_key] if not math.isnan(snake_case__ ): metrics.append(snake_case__ ) results.append(snake_case__ ) outcome += "✓" else: outcome += "✘" A = F'\33[2K\r{outcome}' if len(snake_case__ ) > 0: A = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} A = round(mean_metrics[target_metric_key] , 2 ) A = F'{outcome} {mean_target}' if len(snake_case__ ) > 1: results_str += F' {tuple(round(snake_case__ , 2 ) for x in results )}' print(snake_case__ ) A = variation return mean_metrics else: print(snake_case__ ) return {variation_key: variation, target_metric_key: nan} def _snake_case ( ): A = torch.cuda.get_device_properties(torch.device('cuda' ) ) return F'\nDatetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n' def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): A = pd.DataFrame(snake_case__ ) A = 'variation' A = 'diff_%' A = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan A = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(snake_case__ ): # as a fallback, use the minimal value as the sentinel A = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(snake_case__ ): A = df.apply( lambda snake_case__ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis='columns' , ) # re-order columns A = [variation_key, target_metric_key, diff_key, *report_metric_keys] A = df.reindex(snake_case__ , axis='columns' ) # reorder cols # capitalize A = df.rename(str.capitalize , axis='columns' ) # make the cols as narrow as possible A = df.rename(lambda snake_case__ : c.replace('_' , '<br>' ) , axis='columns' ) A = df.rename(lambda snake_case__ : c.replace('_' , '\n' ) , axis='columns' ) A = ['', 'Copy between the cut-here-lines and paste as is to github or a forum'] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=snake_case__ , floatfmt='.2f' )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=snake_case__ , floatfmt='.2f' )] print('\n\n'.join(snake_case__ ) ) def _snake_case ( ): A = argparse.ArgumentParser() parser.add_argument( '--base-cmd' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Base cmd' , ) parser.add_argument( '--variations' , default=snake_case__ , type=snake_case__ , nargs='+' , required=snake_case__ , help='Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'' , ) parser.add_argument( '--base-variation' , default=snake_case__ , type=snake_case__ , help='Baseline variation to compare to. if None the minimal target value will be used to compare against' , ) parser.add_argument( '--target-metric-key' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='Target metric key in output_dir/all_results.json, e.g., train_samples_per_second' , ) parser.add_argument( '--report-metric-keys' , default='' , type=snake_case__ , help='Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples' , ) parser.add_argument( '--repeat-times' , default=1 , type=snake_case__ , help='How many times to re-run each variation - an average will be reported' , ) parser.add_argument( '--output_dir' , default='output_benchmark' , type=snake_case__ , help='The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked' , ) parser.add_argument( '--verbose' , default=snake_case__ , action='store_true' , help='Whether to show the outputs of each run or just the benchmark progress' , ) A = parser.parse_args() A = args.output_dir Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) A = get_base_command(snake_case__ , snake_case__ ) # split each dimension into its --foo variations A = [list(map(str.strip , re.split(r'\|' , snake_case__ ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty A = list(map(str.strip , map(' '.join , itertools.product(*snake_case__ ) ) ) ) A = max(len(snake_case__ ) for x in variations ) # split wanted keys A = args.report_metric_keys.split() # capture prints into a log file for convenience A = F'benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt' print(F'\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt' ) print(F'and this script\'s output is also piped into {report_fn}' ) A = Tee(snake_case__ ) print(F'\n*** Running {len(snake_case__ )} benchmarks:' ) print(F'Base command: {" ".join(snake_case__ )}' ) A = 'variation' A = [] for id, variation in enumerate(tqdm(snake_case__ , desc='Total completion: ' , leave=snake_case__ ) ): A = base_cmd + variation.split() results.append( process_run( id + 1 , snake_case__ , snake_case__ , snake_case__ , snake_case__ , args.target_metric_key , snake_case__ , args.repeat_times , snake_case__ , args.verbose , ) ) process_results(snake_case__ , args.target_metric_key , snake_case__ , args.base_variation , snake_case__ ) if __name__ == "__main__": main()
91
1
"""simple docstring""" from maths.prime_check import is_prime def _snake_case ( snake_case__ : int ): if not isinstance(snake_case__ , snake_case__ ): A = F'Input value of [number={number}] must be an integer' raise TypeError(snake_case__ ) if is_prime(snake_case__ ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
91
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowercase = get_logger(__name__) def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving model to {ckpt_dir}' ) A = {'model': state_dict} dist_cp.save_state_dict( state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading model from {input_model_file}' ) A = torch.load(snake_case__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A = ( os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) A = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , ) A = state_dict['model'] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case__ ) def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ): os.makedirs(snake_case__ , exist_ok=snake_case__ ) with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): A = FSDP.optim_state_dict(snake_case__ , snake_case__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case__ , snake_case__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case__ , exist_ok=snake_case__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) A = os.path.join(snake_case__ , snake_case__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) A = torch.load(snake_case__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: A = ( os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) A = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , ) A = optim_state['optimizer'] logger.info(F'Optimizer loaded from {ckpt_dir}' ) A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ ) optimizer.load_state_dict(snake_case__ )
91
1