code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict lowercase : List[str] = namedtuple( '_TestCommandArgs', [ 'dataset', 'name', 'cache_dir', 'data_dir', 'all_configs', 'save_infos', 'ignore_verifications', 'force_redownload', 'clear_cache', ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = _TestCommandArgs(dataset=_A , all_configs=_A , save_infos=_A ) A : List[str] = TestCommand(*_A ) test_command.run() A : Optional[int] = os.path.join(_A , '''README.md''' ) assert os.path.exists(_A ) A : Optional[int] = DatasetInfosDict.from_directory(_A ) A : Dict = DatasetInfosDict( { '''default''': DatasetInfo( features=Features( { '''tokens''': Sequence(Value('''string''' ) ), '''ner_tags''': Sequence( ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ), '''langs''': Sequence(Value('''string''' ) ), '''spans''': Sequence(Value('''string''' ) ), } ) , splits=[ { '''name''': '''train''', '''num_bytes''': 235_1563, '''num_examples''': 1_0000, }, { '''name''': '''validation''', '''num_bytes''': 23_8418, '''num_examples''': 1000, }, ] , download_size=394_0680 , dataset_size=258_9981 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: A, A : Tuple = getattr(dataset_infos['''default'''] , _A ), getattr(expected_dataset_infos['''default'''] , _A ) if key == "num_bytes": assert is_apercent_close(_A , _A ) elif key == "splits": assert list(_A ) == list(_A ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' from manim import * class A ( snake_case_ ): def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : str = Rectangle(height=0.5 , width=0.5 ) A : Optional[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) A : int = [mem.copy() for i in range(6 )] A : List[Any] = [mem.copy() for i in range(6 )] A : Dict = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 ) A : List[str] = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 ) A : int = VGroup(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 ) A : int = Text('''CPU''' , font_size=24 ) A : List[str] = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE ) cpu.move_to([-2.5, -0.5, 0] ) self.add(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = [mem.copy() for i in range(1 )] A : Dict = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 ) A : Union[str, Any] = Text('''GPU''' , font_size=24 ) A : Optional[Any] = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE ) gpu.align_to(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) gpu.set_x(gpu.get_x() - 1 ) self.add(SCREAMING_SNAKE_CASE ) A : int = [mem.copy() for i in range(6 )] A : int = VGroup(*SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0 ) A : Tuple = Text('''Model''' , font_size=24 ) A : Dict = Group(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).arrange(SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE ) model.move_to([3, -1.0, 0] ) self.play( Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , Create(SCREAMING_SNAKE_CASE , run_time=1 ) , ) A : Union[str, Any] = MarkupText( F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , ) A : Tuple = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) A : Dict = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE ) , Write(SCREAMING_SNAKE_CASE ) ) self.add(SCREAMING_SNAKE_CASE ) A : Tuple = [] A : List[str] = [] A : Tuple = [] for i, rect in enumerate(SCREAMING_SNAKE_CASE ): A : Any = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE , opacity=0.7 ) cpu_target.move_to(SCREAMING_SNAKE_CASE ) cpu_target.generate_target() A : int = 0.46 / 4 A : Tuple = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=SCREAMING_SNAKE_CASE ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE , buff=0.0 ) cpu_targs.append(SCREAMING_SNAKE_CASE ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE ) ) second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE , run_time=1.5 ) ) self.play(*SCREAMING_SNAKE_CASE ) self.play(*SCREAMING_SNAKE_CASE ) self.wait()
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = sorted(numsa + numsa ) A : Any = divmod(len(snake_case__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() lowercase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] lowercase : Any = [float(x) for x in input('Enter the elements of second array: ').split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import os import sys import transformers lowercase : List[str] = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 lowercase : List[Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json') lowercase : Tuple = get_tests_dir('fixtures/vocab.json') lowercase : Any = get_tests_dir('fixtures') class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Dict = 0 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig() A : List[Any] = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' ) # save in new folder model_config.save_pretrained(SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : Any = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) A : Optional[int] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Any = WavaVecaFeatureExtractor() A : Dict = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : Dict = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Tuple = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Optional[Any] = WavaVecaFeatureExtractor() A : List[str] = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' ) A : Optional[int] = WavaVecaProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''r''' ) as f: A : Any = json.load(SCREAMING_SNAKE_CASE ) config_dict.pop('''processor_class''' ) with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write(json.dumps(SCREAMING_SNAKE_CASE ) ) A : int = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: A : Union[str, Any] = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' ) model_config.save_pretrained(SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''vocab.json''' ) ) # create emtpy sample processor with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , '''w''' ) as f: f.write('''{}''' ) A : str = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[int] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Union[str, Any] = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) A : Optional[int] = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' ) A : int = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' ) # Test we can also load the slow version A : int = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE ) A : str = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' ) else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : Tuple = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Any = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : List[Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[int] = AutoProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" class A ( SCREAMING_SNAKE_CASE__ ): __magic_name__ = False class A ( SCREAMING_SNAKE_CASE__ ): __magic_name__ = False class A ( SCREAMING_SNAKE_CASE__ ): __magic_name__ = '''AutoFeatureExtractor''' __magic_name__ = '''AutoTokenizer''' __magic_name__ = False try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) AutoTokenizer.register(SCREAMING_SNAKE_CASE , slow_tokenizer_class=SCREAMING_SNAKE_CASE ) AutoProcessor.register(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. A : Tuple = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. A : str = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. A : Tuple = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Dict = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' ) self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' ) @is_staging_test class A ( unittest.TestCase ): __magic_name__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def __lowerCAmelCase ( cls ) -> str: """simple docstring""" A : Any = TOKEN HfFolder.save_token(SCREAMING_SNAKE_CASE ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: """simple docstring""" try: delete_repo(token=cls._token , repo_id='''test-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token ) A : Optional[Any] = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = WavaVecaProcessor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(SCREAMING_SNAKE_CASE , '''test-processor-org''' ) , push_to_hub=SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization='''valid_org''' , ) A : List[str] = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A : Union[str, Any] = CustomFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''vocab.txt''' ) with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) ) A : Optional[int] = CustomTokenizer(SCREAMING_SNAKE_CASE ) A : Optional[Any] = CustomProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'{USER}/test-dynamic-processor' , token=self._token ) A : Optional[int] = Repository(SCREAMING_SNAKE_CASE , clone_from=F'{USER}/test-dynamic-processor' , token=self._token ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(SCREAMING_SNAKE_CASE , '''tokenizer_config.json''' ) ) as f: A : int = json.load(SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_feature_extraction.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_tokenization.py''' ) ) ) self.assertTrue(os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , '''custom_processing.py''' ) ) ) repo.push_to_hub() A : Union[str, Any] = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' from __future__ import annotations from cmath import sqrt def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' if a == 0: raise ValueError('''Coefficient \'a\' must not be zero.''' ) A : Any = b * b - 4 * a * c A : Any = (-b + sqrt(a__ )) / (2 * a) A : Dict = (-b - sqrt(a__ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def lowerCAmelCase_ ( ): '''simple docstring''' A, A : Any = quadratic_roots(a=5 , b=6 , c=1 ) print(F'The solutions are: {solutiona} and {solutiona}' ) if __name__ == "__main__": main()
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' from math import factorial def lowerCAmelCase_ ( snake_case__ = 100 ): '''simple docstring''' return sum(int(snake_case__ ) for x in str(factorial(snake_case__ ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' from math import pow def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count A : Any = int(pow(__lowerCAmelCase , __lowerCAmelCase ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n A : Tuple = backtrack( __lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. A : List[str] = backtrack( __lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase ) return current_sum, solutions_count def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if not (1 <= needed_sum <= 1000 and 2 <= power <= 10): raise ValueError( '''Invalid input\n''' '''needed_sum must be between 1 and 1000, power between 2 and 10.''' ) return backtrack(__lowerCAmelCase , __lowerCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(lowercase__ ) as metadata_file: A : Tuple = json.load(lowercase__ ) A : Tuple = LukeConfig(use_entity_aware_attention=lowercase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path A : Union[str, Any] = torch.load(lowercase__ , map_location='''cpu''' ) # Load the entity vocab file A : Optional[int] = load_entity_vocab(lowercase__ ) A : int = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks A : List[Any] = AddedToken('''<ent>''' , lstrip=lowercase__ , rstrip=lowercase__ ) A : str = AddedToken('''<ent2>''' , lstrip=lowercase__ , rstrip=lowercase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(lowercase__ ) with open(os.path.join(lowercase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(lowercase__ , lowercase__ ) A : Optional[int] = LukeTokenizer.from_pretrained(lowercase__ ) # Initialize the embeddings of the special tokens A : str = state_dict['''embeddings.word_embeddings.weight'''] A : Tuple = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 ) A : str = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 ) A : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: A : Optional[Any] = F'encoder.layer.{layer_index}.attention.self.' A : str = state_dict[prefix + matrix_name] A : Optional[int] = state_dict[prefix + matrix_name] A : Union[str, Any] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks A : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight'''] A : List[str] = entity_emb[entity_vocab['''[MASK]''']] A : List[Any] = LukeModel(config=lowercase__ ).eval() A, A : Optional[int] = model.load_state_dict(lowercase__ , strict=lowercase__ ) if not (len(lowercase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'Missing keys {", ".join(lowercase__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )): raise ValueError( '''Unexpected keys''' F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs A : Union[str, Any] = LukeTokenizer.from_pretrained(lowercase__ , task='''entity_classification''' ) A : Tuple = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) A : Any = (39, 42) A : Dict = tokenizer(lowercase__ , entity_spans=[span] , add_prefix_space=lowercase__ , return_tensors='''pt''' ) A : Dict = model(**lowercase__ ) # Verify word hidden states if model_size == "large": A : Optional[int] = torch.Size((1, 42, 1024) ) A : Union[str, Any] = torch.tensor( [[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] ) else: # base A : Tuple = torch.Size((1, 42, 768) ) A : List[Any] = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": A : Any = torch.Size((1, 1, 1024) ) A : Optional[int] = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] ) else: # base A : Union[str, Any] = torch.Size((1, 1, 768) ) A : Optional[Any] = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(lowercase__ ) ) model.save_pretrained(lowercase__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = {} with open(lowercase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(lowercase__ ): A, A : Tuple = line.rstrip().split('''\t''' ) A : int = index return entity_vocab if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) lowercase : int = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase : Tuple = 16 lowercase : str = 32 def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 ): '''simple docstring''' A : str = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) A : Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A : Any = datasets.map( __snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A : str = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. A : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A : Optional[Any] = 16 elif accelerator.mixed_precision != "no": A : Optional[int] = 8 else: A : List[Any] = None return tokenizer.pad( __snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , ) # Instantiate dataloaders. A : Optional[Any] = DataLoader( tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case ) A : Optional[int] = DataLoader( tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == '''fp8''') , ) return train_dataloader, eval_dataloader def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A : Any = config["lr"] A : Any = int(config['''num_epochs'''] ) A : str = int(config['''seed'''] ) A : List[Any] = int(config['''batch_size'''] ) A : List[Any] = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation A : str = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A : Any = batch_size // MAX_GPU_BATCH_SIZE A : Optional[Any] = MAX_GPU_BATCH_SIZE set_seed(__snake_case ) A : List[Any] = get_dataloaders(__snake_case , __snake_case ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A : int = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A : Union[str, Any] = model.to(accelerator.device ) # Instantiate optimizer A : List[str] = AdamW(params=model.parameters() , lr=__snake_case ) # Instantiate scheduler A : Optional[int] = get_linear_schedule_with_warmup( optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A : Union[str, Any] = accelerator.prepare( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Now we train the model for epoch in range(__snake_case ): model.train() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A : List[str] = model(**__snake_case ) A : int = outputs.loss A : Dict = loss / gradient_accumulation_steps accelerator.backward(__snake_case ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A : int = model(**__snake_case ) A : Union[str, Any] = outputs.logits.argmax(dim=-1 ) A : Dict = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=__snake_case , references=__snake_case , ) A : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , __snake_case ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Any = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) A : Optional[int] = parser.parse_args() A : Tuple = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(__snake_case , __snake_case ) if __name__ == "__main__": main()
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="last" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=0 , ) -> Dict: """simple docstring""" A : int = parent A : List[Any] = batch_size A : str = seq_length A : List[str] = is_training A : Any = use_input_lengths A : Union[str, Any] = use_token_type_ids A : Union[str, Any] = use_labels A : int = gelu_activation A : Optional[Any] = sinusoidal_embeddings A : List[str] = causal A : Union[str, Any] = asm A : Tuple = n_langs A : Union[str, Any] = vocab_size A : List[Any] = n_special A : str = hidden_size A : int = num_hidden_layers A : List[str] = num_attention_heads A : str = hidden_dropout_prob A : str = attention_probs_dropout_prob A : Union[str, Any] = max_position_embeddings A : Optional[int] = type_sequence_label_size A : Tuple = initializer_range A : List[Any] = num_labels A : Dict = num_choices A : List[Any] = summary_type A : Any = use_proj A : Any = scope A : List[Any] = bos_token_id def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : Tuple = None if self.use_input_lengths: A : List[str] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length A : Optional[Any] = None if self.use_token_type_ids: A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) A : Optional[Any] = None A : Any = None A : Union[str, Any] = None if self.use_labels: A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Optional[int] = ids_tensor([self.batch_size] , 2 ).float() A : List[str] = ids_tensor([self.batch_size] , self.num_choices ) A : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = XLMModel(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : Any = model(lowerCamelCase_ , lengths=lowerCamelCase_ , langs=lowerCamelCase_ ) A : Dict = model(lowerCamelCase_ , langs=lowerCamelCase_ ) A : Union[str, Any] = model(lowerCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[int] = XLMWithLMHeadModel(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : Any = model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Union[str, Any] = XLMForQuestionAnsweringSimple(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : str = model(lowerCamelCase_ ) A : List[Any] = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) A : str = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" A : Optional[Any] = XLMForQuestionAnswering(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : Tuple = model(lowerCamelCase_ ) A : Union[str, Any] = model( lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , p_mask=lowerCamelCase_ , ) A : Tuple = model( lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ , cls_index=lowerCamelCase_ , is_impossible=lowerCamelCase_ , ) (A ) : str = result_with_labels.to_tuple() A : List[str] = model(lowerCamelCase_ , start_positions=lowerCamelCase_ , end_positions=lowerCamelCase_ ) (A ) : Dict = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : Dict = XLMForSequenceClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : int = model(lowerCamelCase_ ) A : Tuple = model(lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : Dict = self.num_labels A : List[Any] = XLMForTokenClassification(lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : Tuple = model(lowerCamelCase_ , attention_mask=lowerCamelCase_ , labels=lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : Any = self.num_choices A : Optional[Any] = XLMForMultipleChoice(config=lowerCamelCase_ ) model.to(lowerCamelCase_ ) model.eval() A : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() A : str = model( lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : List[Any] = self.prepare_config_and_inputs() ( A ) : Dict = config_and_inputs A : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __magic_name__ = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __magic_name__ = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Any: """simple docstring""" A : int = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": A : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) A : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ ) return inputs_dict def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = XLMModelTester(self ) A : int = ConfigTester(self , config_class=lowerCamelCase_ , emb_dim=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase_ ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=1 ) -> Optional[int]: """simple docstring""" self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual( [isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_attentions in attentions] , [True] * len(lowerCamelCase_ ) ) self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(lowerCamelCase_ ): # adds PAD dummy token A : int = min_length + idx + 1 A : Tuple = min_length + idx + 1 A : str = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase_ ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=1 ) -> Dict: """simple docstring""" self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) self.assertListEqual( [isinstance(lowerCamelCase_ , lowerCamelCase_ ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase_ ) , ) self.assertEqual(len(lowerCamelCase_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(lowerCamelCase_ ): # adds PAD dummy token A : List[Any] = min_length + idx + 1 A : List[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase_ ) , ) pass @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : int = XLMModel.from_pretrained(lowerCamelCase_ ) self.assertIsNotNone(lowerCamelCase_ ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[str] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' ) model.to(lowerCamelCase_ ) A : str = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase_ ) # the president A : Dict = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference A : Tuple = model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase_ )
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights A : Tuple = FlaxDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_lowercase , cache_dir=_lowercase ) A : Optional[Any] = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , '''snapshots''' ) )] A : Optional[Any] = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith('''.bin''' ) for f in files ) @slow @require_flax class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A, A : Tuple = FlaxStableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=_lowercase ) A : Union[str, Any] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) A : Any = jax.random.PRNGKey(0 ) A : List[str] = 4 A : Optional[int] = jax.device_count() A : str = num_samples * [prompt] A : str = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A : Tuple = replicate(_lowercase ) A : int = jax.random.split(_lowercase , _lowercase ) A : Union[str, Any] = shard(_lowercase ) A : List[str] = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3 assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1 A : List[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) ) assert len(_lowercase ) == num_samples def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A, A : str = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=_lowercase ) A : Any = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) A : Optional[int] = jax.random.PRNGKey(0 ) A : Optional[int] = 50 A : Union[str, Any] = jax.device_count() A : List[str] = num_samples * [prompt] A : List[Any] = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A : List[str] = replicate(_lowercase ) A : Dict = jax.random.split(_lowercase , _lowercase ) A : str = shard(_lowercase ) A : Union[str, Any] = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A, A : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_lowercase ) A : Optional[int] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) A : int = jax.random.PRNGKey(0 ) A : str = 50 A : Tuple = jax.device_count() A : List[Any] = num_samples * [prompt] A : Optional[Any] = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A : List[Any] = replicate(_lowercase ) A : Dict = jax.random.split(_lowercase , _lowercase ) A : int = shard(_lowercase ) A : Optional[Any] = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A, A : Dict = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa ) A : List[str] = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) A : Any = jax.random.PRNGKey(0 ) A : Tuple = 50 A : Tuple = jax.device_count() A : Union[str, Any] = num_samples * [prompt] A : Optional[int] = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A : Union[str, Any] = replicate(_lowercase ) A : Optional[int] = jax.random.split(_lowercase , _lowercase ) A : List[Any] = shard(_lowercase ) A : Any = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : int = FlaxDDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=_lowercase , steps_offset=1 , ) A, A : str = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , ) A : str = scheduler.create_state() A : Union[str, Any] = scheduler_state A : Any = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) A : Any = jax.random.PRNGKey(0 ) A : List[str] = 50 A : Any = jax.device_count() A : List[Any] = num_samples * [prompt] A : int = pipeline.prepare_inputs(_lowercase ) # shard inputs and rng A : Tuple = replicate(_lowercase ) A : Tuple = jax.random.split(_lowercase , _lowercase ) A : Optional[int] = shard(_lowercase ) A : int = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3 assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1 def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = ( '''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of''' ''' field, close up, split lighting, cinematic''' ) A : Optional[Any] = jax.device_count() A : Dict = num_samples * [prompt] A : Optional[Any] = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase ) A, A : Any = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_lowercase , ) A : List[str] = replicate(_lowercase ) A : List[str] = pipeline.prepare_inputs(_lowercase ) A : List[Any] = shard(_lowercase ) A : str = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images.shape == (num_samples, 1, 512, 512, 3) A : Optional[int] = images[2, 0, 256, 10:17, 1] # With memory efficient attention A, A : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , ) A : str = replicate(_lowercase ) A : Dict = pipeline.prepare_inputs(_lowercase ) A : Optional[Any] = shard(_lowercase ) A : int = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) A : int = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice ).max() < 1e-2
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float(moles / volume ) * nfactor ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float((moles * 0.08_21 * temperature) / (volume) ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float((moles * 0.08_21 * temperature) / (pressure) ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return round(float((pressure * volume) / (0.08_21 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' import math import sys def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[Any] = """""" try: with open(snake_case__ , '''rb''' ) as binary_file: A : Dict = binary_file.read() for dat in data: A : List[str] = F'{dat:08b}' result += curr_byte return result except OSError: print('''File not accessible''' ) sys.exit() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Union[str, Any] = {"""0""": """0""", """1""": """1"""} A : List[str] = """""", """""" A : Tuple = len(snake_case__ ) for i in range(len(snake_case__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue A : int = lexicon[curr_string] result += last_match_id A : Dict = last_match_id + """0""" if math.loga(snake_case__ ).is_integer(): A : Any = {} for curr_key in list(snake_case__ ): A : Optional[int] = lexicon.pop(snake_case__ ) A : Tuple = new_lex A : List[str] = last_match_id + """1""" index += 1 A : Dict = """""" return result def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = 8 try: with open(snake_case__ , '''wb''' ) as opened_file: A : Any = [ to_write[i : i + byte_length] for i in range(0 , len(snake_case__ ) , snake_case__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('''10000000''' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder='''big''' ) ) except OSError: print('''File not accessible''' ) sys.exit() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Union[str, Any] = 0 for letter in data_bits: if letter == "1": break counter += 1 A : List[str] = data_bits[counter:] A : Union[str, Any] = data_bits[counter + 1 :] return data_bits def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = read_file_binary(snake_case__ ) A : Dict = remove_prefix(snake_case__ ) A : Any = decompress_data(snake_case__ ) write_file_binary(snake_case__ , snake_case__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 50 ): '''simple docstring''' A : Any = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import sys lowercase : int = ( '73167176531330624919225119674426574742355349194934' '96983520312774506326239578318016984801869478851843' '85861560789112949495459501737958331952853208805511' '12540698747158523863050715693290963295227443043557' '66896648950445244523161731856403098711121722383113' '62229893423380308135336276614282806444486645238749' '30358907296290491560440772390713810515859307960866' '70172427121883998797908792274921901699720888093776' '65727333001053367881220235421809751254540594752243' '52584907711670556013604839586446706324415722155397' '53697817977846174064955149290862569321978468622482' '83972241375657056057490261407972968652414535100474' '82166370484403199890008895243450658541227588666881' '16427171479924442928230863465674813919123162824586' '17866458359124566529476545682848912883142607690042' '24219022671055626321111109370544217506941658960408' '07198403850962455444362981230987879927244284909188' '84580156166097919133875499200524063689912560717606' '05886116467109405077541002256983155200055935729725' '71636269561882670428252483600823257530420752963450' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = 1 for digit in s: product *= int(_lowerCamelCase ) return product def lowerCAmelCase_ ( snake_case__ = N ): '''simple docstring''' A : Dict = -sys.maxsize - 1 A : List[Any] = n[:13] A : Optional[int] = 13 while cur_index < len(_lowerCamelCase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): A : List[Any] = substr[1:] + n[cur_index] cur_index += 1 else: A : Any = max(_lowerCamelCase , str_eval(_lowerCamelCase ) ) A : Dict = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowercase : Union[str, Any] = argparse.ArgumentParser( description=( 'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned' ' Distillation' ) ) parser.add_argument('--model_type', default='bert', choices=['bert']) parser.add_argument('--model_name', default='bert-base-uncased', type=str) parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str) parser.add_argument('--vocab_transform', action='store_true') lowercase : Any = parser.parse_args() if args.model_type == "bert": lowercase : List[str] = BertForMaskedLM.from_pretrained(args.model_name) lowercase : int = 'bert' else: raise ValueError('args.model_type should be \"bert\".') lowercase : Any = model.state_dict() lowercase : Optional[Any] = {} for w in ["word_embeddings", "position_embeddings"]: lowercase : List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: lowercase : List[Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}'''] lowercase : Any = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowercase : Optional[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] lowercase : Any = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] lowercase : Tuple = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] lowercase : List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] lowercase : Dict = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] lowercase : List[Any] = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] lowercase : int = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] lowercase : Dict = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 lowercase : Optional[int] = state_dict['cls.predictions.decoder.weight'] lowercase : Optional[Any] = state_dict['cls.predictions.bias'] if args.vocab_transform: for w in ["weight", "bias"]: lowercase : Tuple = state_dict[f'''cls.predictions.transform.dense.{w}'''] lowercase : List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}'''] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class A ( a_ ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="None" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> List[Any]: """simple docstring""" A : Any = parent A : Any = batch_size A : List[str] = seq_length A : List[str] = is_training A : Union[str, Any] = use_input_mask A : Any = use_token_type_ids A : Tuple = use_labels A : List[str] = vocab_size A : Tuple = hidden_size A : str = num_hidden_layers A : str = num_attention_heads A : Union[str, Any] = intermediate_size A : Union[str, Any] = hidden_act A : str = hidden_dropout_prob A : str = attention_probs_dropout_prob A : str = max_position_embeddings A : List[Any] = type_vocab_size A : int = type_sequence_label_size A : str = initializer_range A : str = num_labels A : Optional[Any] = num_choices A : List[str] = relative_attention A : List[Any] = position_biased_input A : int = pos_att_type A : Dict = scope def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Any = None if self.use_input_mask: A : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) A : List[str] = None if self.use_token_type_ids: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Dict = None A : List[Any] = None A : str = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) A : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[str] = self.get_config() A : str = 300 return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : List[str] = DebertaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0] A : List[Any] = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE )[0] A : str = model(SCREAMING_SNAKE_CASE )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : str = DebertaForMaskedLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : List[str] = self.num_labels A : List[str] = DebertaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Dict = self.num_labels A : str = DebertaForTokenClassification(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = DebertaForQuestionAnswering(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Any = config_and_inputs A : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( a_ , a_ , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = DebertaModelTester(self ) A : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[Any] = DebertaModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @unittest.skip(reason='''Model not available yet''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" pass @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = DebertaModel.from_pretrained('''microsoft/deberta-base''' ) A : int = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] ) A : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )[0] # compare the actual values for a slice. A : Any = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionSAGPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): __magic_name__ = StableDiffusionSAGPipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS __magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" torch.manual_seed(0 ) A : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) A : Tuple = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_snake_case , set_alpha_to_one=_snake_case , ) torch.manual_seed(0 ) A : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) A : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) A : Union[str, Any] = CLIPTextModel(_snake_case ) A : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) A : Optional[int] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Dict: """simple docstring""" if str(_snake_case ).startswith('''mps''' ): A : Dict = torch.manual_seed(_snake_case ) else: A : List[Any] = torch.Generator(device=_snake_case ).manual_seed(_snake_case ) A : Dict = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "numpy", } return inputs def __lowerCAmelCase ( self ) -> int: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ) A : Tuple = sag_pipe.to(_snake_case ) sag_pipe.set_progress_bar_config(disable=_snake_case ) A : Optional[int] = "." A : Optional[int] = torch.manual_seed(0 ) A : List[str] = sag_pipe( [prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) A : Tuple = output.images A : int = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A : List[Any] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[int] = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) A : Optional[Any] = sag_pipe.to(_snake_case ) sag_pipe.set_progress_bar_config(disable=_snake_case ) A : Union[str, Any] = "." A : Dict = torch.manual_seed(0 ) A : Union[str, Any] = sag_pipe( [prompt] , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' ) A : Optional[Any] = output.images A : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) A : Union[str, Any] = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2 def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Dict = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' ) A : str = sag_pipe.to(_snake_case ) sag_pipe.set_progress_bar_config(disable=_snake_case ) A : str = "." A : Union[str, Any] = torch.manual_seed(0 ) A : int = sag_pipe( [prompt] , width=768 , height=512 , generator=_snake_case , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , ) A : Union[str, Any] = output.images assert image.shape == (1, 512, 768, 3)
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' import json import os import shutil import tempfile from unittest import TestCase from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast from transformers.models.bart.configuration_bart import BartConfig from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.dpr.configuration_dpr import DPRConfig from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available if is_torch_available() and is_datasets_available() and is_faiss_available(): from transformers.models.rag.configuration_rag import RagConfig from transformers.models.rag.tokenization_rag import RagTokenizer @require_faiss @require_torch class A ( __snake_case ): def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : str = tempfile.mkdtemp() A : Dict = 8 # DPR tok A : Optional[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] A : List[str] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) # BART tok A : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] A : Any = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : str = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A : Any = {'''unk_token''': '''<unk>'''} A : List[Any] = os.path.join(self.tmpdirname , '''bart_tokenizer''' ) os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) A : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''vocab_file'''] ) A : Any = os.path.join(SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self ) -> DPRQuestionEncoderTokenizer: """simple docstring""" return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) ) def __lowerCAmelCase ( self ) -> BartTokenizer: """simple docstring""" return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) @require_tokenizers def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Dict = os.path.join(self.tmpdirname , '''rag_tokenizer''' ) A : Union[str, Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() ) A : Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() ) rag_config.save_pretrained(SCREAMING_SNAKE_CASE ) rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE ) A : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE ) self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE ) self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() ) self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE ) self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[str] = RagTokenizer.from_pretrained('''facebook/rag-token-nq''' ) A : Tuple = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] A : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = RagTokenizer.from_pretrained('''facebook/rag-sequence-nq''' ) A : Tuple = [ '''who got the first nobel prize in physics''', '''when is the next deadpool movie being released''', '''which mode is used for short wave broadcast service''', '''who is the owner of reading football club''', '''when is the next scandal episode coming out''', '''when is the last time the philadelphia won the superbowl''', '''what is the most current adobe flash player version''', '''how many episodes are there in dragon ball z''', '''what is the first step in the evolution of the eye''', '''where is gall bladder situated in human body''', '''what is the main mineral in lithium batteries''', '''who is the president of usa right now''', '''where do the greasers live in the outsiders''', '''panda is a national animal of which country''', '''what is the name of manchester united stadium''', ] A : Dict = tokenizer(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE )
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' from __future__ import annotations class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A, A : List[Any] = text, pattern A, A : Optional[Any] = len(SCREAMING_SNAKE_CASE ), len(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Tuple = [] for i in range(self.textLen - self.patLen + 1 ): A : Optional[int] = self.mismatch_in_text(SCREAMING_SNAKE_CASE ) if mismatch_index == -1: positions.append(SCREAMING_SNAKE_CASE ) else: A : List[str] = self.match_in_pattern(self.text[mismatch_index] ) A : Optional[int] = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions lowercase : Optional[int] = 'ABAABA' lowercase : Tuple = 'AB' lowercase : Tuple = BoyerMooreSearch(text, pattern) lowercase : Tuple = bms.bad_character_heuristic() if len(positions) == 0: print('No match found') else: print('Pattern found in following positions: ') print(positions)
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class A ( lowerCamelCase__ , unittest.TestCase ): __magic_name__ = FlaxAutoencoderKL @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Tuple = 4 A : List[Any] = 3 A : Dict = (32, 32) A : str = jax.random.PRNGKey(0 ) A : Tuple = jax.random.uniform(SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[int] = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } A : Any = self.dummy_input return init_dict, inputs_dict
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowercase : Dict = logging.get_logger(__name__) class A ( __snake_case ): __magic_name__ = ['''input_ids''', '''attention_mask'''] def __init__( self , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE=125 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: A : Optional[Any] = [F'<extra_id_{i}>' for i in range(lowerCamelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens A : Optional[int] = len(set(filter(lambda SCREAMING_SNAKE_CASE : bool('''extra_id''' in str(lowerCamelCase_ ) ) , lowerCamelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' ''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the''' ''' extra_ids tokens''' ) A : Optional[Any] = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token A : str = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token A : int = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token super().__init__( eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , extra_ids=lowerCamelCase_ , additional_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) A : List[Any] = extra_ids A : Any = 2**8 # utf is 8 bits # define special tokens dict A : Optional[Any] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } A : Dict = len(self.special_tokens_encoder ) A : Optional[int] = len(lowerCamelCase_ ) for i, token in enumerate(lowerCamelCase_ ): A : Union[str, Any] = self.vocab_size + i - n A : Any = {v: k for k, v in self.special_tokens_encoder.items()} @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(lowerCamelCase_ )) + [1] return ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ )) + [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if len(lowerCamelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated' ''' eos tokens being added.''' ) return token_ids else: return token_ids + [self.eos_token_id] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> int: """simple docstring""" A : Any = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : List[Any] = self._add_eos_if_not_present(lowerCamelCase_ ) if token_ids_a is None: return token_ids_a else: A : int = self._add_eos_if_not_present(lowerCamelCase_ ) return token_ids_a + token_ids_a def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : int = [chr(lowerCamelCase_ ) for i in text.encode('''utf-8''' )] return tokens def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if token in self.special_tokens_encoder: A : List[str] = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: A : List[Any] = self.added_tokens_encoder[token] elif len(lowerCamelCase_ ) != 1: A : Tuple = self.unk_token_id else: A : Optional[int] = ord(lowerCamelCase_ ) + self._num_special_tokens return token_id def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if index in self.special_tokens_decoder: A : Tuple = self.special_tokens_decoder[index] else: A : Tuple = chr(index - self._num_special_tokens ) return token def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : List[Any] = b'''''' for token in tokens: if token in self.special_tokens_decoder: A : int = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.added_tokens_decoder: A : Optional[Any] = self.special_tokens_decoder[token].encode('''utf-8''' ) elif token in self.special_tokens_encoder: A : List[str] = token.encode('''utf-8''' ) elif token in self.added_tokens_encoder: A : List[str] = token.encode('''utf-8''' ) else: A : List[Any] = bytes([ord(lowerCamelCase_ )] ) bstring += tok_string A : Dict = bstring.decode('''utf-8''' , errors='''ignore''' ) return string def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> int: """simple docstring""" return ()
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Any: """simple docstring""" A : str = parent A : Dict = batch_size A : List[str] = seq_length A : str = is_training A : Any = use_token_type_ids A : Any = use_labels A : List[Any] = vocab_size A : Any = hidden_size A : List[str] = num_hidden_layers A : List[str] = num_attention_heads A : Optional[Any] = intermediate_size A : int = hidden_act A : Any = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : Dict = max_position_embeddings A : Dict = type_vocab_size A : str = type_sequence_label_size A : Union[str, Any] = initializer_range A : Union[str, Any] = num_labels A : int = num_choices A : int = scope A : Union[str, Any] = self.vocab_size - 1 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Union[str, Any] = None if self.use_token_type_ids: A : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Optional[Any] = None A : Optional[int] = None A : List[str] = None if self.use_labels: A : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Any = ids_tensor([self.batch_size] , self.num_choices ) A : List[str] = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) A : Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = OpenAIGPTModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() A : List[Any] = model(lowercase_ , token_type_ids=lowercase_ , head_mask=lowercase_ ) A : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ ) A : Optional[int] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Optional[Any] = OpenAIGPTLMHeadModel(lowercase_ ) model.to(lowercase_ ) model.eval() A : List[str] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[int] = OpenAIGPTDoubleHeadsModel(lowercase_ ) model.to(lowercase_ ) model.eval() A : str = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.num_labels A : Dict = OpenAIGPTForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Union[str, Any] = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[str] = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Optional[Any] = config_and_inputs A : Any = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): __magic_name__ = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) __magic_name__ = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly __magic_name__ = ( { '''feature-extraction''': OpenAIGPTModel, '''text-classification''': OpenAIGPTForSequenceClassification, '''text-generation''': OpenAIGPTLMHeadModel, '''zero-shot''': OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" A : Dict = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": A : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ , ) A : Dict = inputs_dict['''labels'''] A : Any = inputs_dict['''labels'''] A : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowercase_ , ) A : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = OpenAIGPTModelTester(self ) A : List[str] = ConfigTester(self , config_class=lowercase_ , n_embd=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*lowercase_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*lowercase_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*lowercase_ ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowercase_ ) @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = OpenAIGPTModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[Any] = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(lowercase_ ) A : str = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=lowercase_ ) # the president is A : Optional[Any] = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the A : int = model.generate(lowercase_ , do_sample=lowercase_ ) self.assertListEqual(output_ids[0].tolist() , lowercase_ )
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' import argparse import os # New Code # import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils import find_executable_batch_size ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to ensure out-of-memory errors never # interrupt training, and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowercase : Union[str, Any] = 16 lowercase : str = 32 def lowerCAmelCase_ ( snake_case__ , snake_case__ = 16 ): '''simple docstring''' A : Dict = AutoTokenizer.from_pretrained('''bert-base-cased''' ) A : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(snake_case__ ): # max_length=None => use the model max length (it's actually the default) A : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase_ , max_length=lowercase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A : Any = datasets.map( lowercase_ , batched=lowercase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A : str = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(snake_case__ ): # On TPU it's best to pad everything to the same length or training will be very slow. A : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A : Optional[int] = 16 elif accelerator.mixed_precision != "no": A : Optional[int] = 8 else: A : Optional[Any] = None return tokenizer.pad( lowercase_ , padding='''longest''' , max_length=lowercase_ , pad_to_multiple_of=lowercase_ , return_tensors='''pt''' , ) # Instantiate dataloaders. A : Optional[int] = DataLoader( tokenized_datasets['''train'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) A : int = DataLoader( tokenized_datasets['''validation'''] , shuffle=lowercase_ , collate_fn=lowercase_ , batch_size=lowercase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowercase : Tuple = mocked_dataloaders # noqa: F811 def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , lowercase_ ) == "1": A : int = 2 # Initialize accelerator A : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A : str = config['''lr'''] A : List[str] = int(config['''num_epochs'''] ) A : Optional[int] = int(config['''seed'''] ) A : int = int(config['''batch_size'''] ) A : Union[str, Any] = evaluate.load('''glue''' , '''mrpc''' ) # New Code # # We now can define an inner training loop function. It should take a batch size as the only parameter, # and build the dataloaders in there. # It also gets our decorator @find_executable_batch_size(starting_batch_size=lowercase_ ) def inner_training_loop(snake_case__ ): # And now just move everything below under this function # We need to bring in the Accelerator object from earlier nonlocal accelerator # And reset all of its attributes that could hold onto any memory: accelerator.free_memory() # Then we can declare the model, optimizer, and everything else: set_seed(lowercase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=lowercase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A : int = model.to(accelerator.device ) # Instantiate optimizer A : str = AdamW(params=model.parameters() , lr=lowercase_ ) A, A : List[str] = get_dataloaders(lowercase_ , lowercase_ ) # Instantiate scheduler A : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowercase_ , num_warmup_steps=100 , num_training_steps=(len(lowercase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A, A, A, A, A : int = accelerator.prepare( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) # Now we train the model for epoch in range(lowercase_ ): model.train() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) A : Optional[int] = model(**lowercase_ ) A : Union[str, Any] = outputs.loss accelerator.backward(lowercase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): A : Any = model(**lowercase_ ) A : int = outputs.logits.argmax(dim=-1 ) A, A : List[str] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=lowercase_ , references=lowercase_ , ) A : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowercase_ ) # New Code # # And call it at the end with no arguments # Note: You could also refactor this outside of your training loop function inner_training_loop() def lowerCAmelCase_ ( ): '''simple docstring''' A : str = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=lowercase_ , default=lowercase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) A : Any = parser.parse_args() A : List[Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(lowercase_ , lowercase_ ) if __name__ == "__main__": main()
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import string import numpy def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return b if a == 0 else greatest_common_divisor(b % a , A_ ) class A : __magic_name__ = string.ascii_uppercase + string.digits # This cipher takes alphanumerics into account # i.e. a total of 36 characters # take x and return x % len(key_string) __magic_name__ = numpy.vectorize(lambda __snake_case : x % 36 ) __magic_name__ = numpy.vectorize(a_ ) def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Any = self.modulus(lowercase_ ) # mod36 calc's on the encrypt key self.check_determinant() # validate the determinant of the encryption key A : int = encrypt_key.shape[0] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return self.key_string.index(lowercase_ ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return self.key_string[round(lowercase_ )] def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: A : str = det % len(self.key_string ) A : Union[str, Any] = len(self.key_string ) if greatest_common_divisor(lowercase_ , len(self.key_string ) ) != 1: A : str = ( F'determinant modular {req_l} of encryption key({det}) ' F'is not co prime w.r.t {req_l}.\nTry another key.' ) raise ValueError(lowercase_ ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Optional[Any] = [char for char in text.upper() if char in self.key_string] A : Optional[Any] = chars[-1] while len(lowercase_ ) % self.break_key != 0: chars.append(lowercase_ ) return "".join(lowercase_ ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = self.process_text(text.upper() ) A : List[Any] = '''''' for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ): A : int = text[i : i + self.break_key] A : List[str] = [self.replace_letters(lowercase_ ) for char in batch] A : Dict = numpy.array([vec] ).T A : List[str] = self.modulus(self.encrypt_key.dot(lowercase_ ) ).T.tolist()[ 0 ] A : int = ''''''.join( self.replace_digits(lowercase_ ) for num in batch_encrypted ) encrypted += encrypted_batch return encrypted def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[Any] = round(numpy.linalg.det(self.encrypt_key ) ) if det < 0: A : Any = det % len(self.key_string ) A : Dict = None for i in range(len(self.key_string ) ): if (det * i) % len(self.key_string ) == 1: A : Union[str, Any] = i break A : List[Any] = ( det_inv * numpy.linalg.det(self.encrypt_key ) * numpy.linalg.inv(self.encrypt_key ) ) return self.to_int(self.modulus(lowercase_ ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : int = self.make_decrypt_key() A : Any = self.process_text(text.upper() ) A : str = '''''' for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ): A : Optional[int] = text[i : i + self.break_key] A : Union[str, Any] = [self.replace_letters(lowercase_ ) for char in batch] A : Optional[Any] = numpy.array([vec] ).T A : Dict = self.modulus(decrypt_key.dot(lowercase_ ) ).T.tolist()[0] A : Any = ''''''.join( self.replace_digits(lowercase_ ) for num in batch_decrypted ) decrypted += decrypted_batch return decrypted def lowerCAmelCase_ ( ): '''simple docstring''' A : Any = int(input('''Enter the order of the encryption key: ''' ) ) A : Any = [] print('''Enter each row of the encryption key with space separated integers''' ) for _ in range(A_ ): A : int = [int(A_ ) for x in input().split()] hill_matrix.append(A_ ) A : List[Any] = HillCipher(numpy.array(A_ ) ) print('''Would you like to encrypt or decrypt some text? (1 or 2)''' ) A : Optional[Any] = input('''\n1. Encrypt\n2. Decrypt\n''' ) if option == "1": A : str = input('''What text would you like to encrypt?: ''' ) print('''Your encrypted text is:''' ) print(hc.encrypt(A_ ) ) elif option == "2": A : Dict = input('''What text would you like to decrypt?: ''' ) print('''Your decrypted text is:''' ) print(hc.decrypt(A_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowercase : Union[str, Any] = 2_00 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowercase : Optional[Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowercase : Optional[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 10_00)) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = len([g for position, g in enumerate(_lowercase ) if g == main_target[position]] ) return (item, float(_lowercase )) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = random.randint(0 , len(_lowercase ) - 1 ) A : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] A : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = list(_lowercase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: A : List[Any] = random.choice(_lowercase ) return "".join(_lowercase ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , ): '''simple docstring''' A : Dict = [] # Generate more children proportionally to the fitness score. A : Any = int(parent_a[1] * 100 ) + 1 A : Dict = 10 if child_n >= 10 else child_n for _ in range(_lowercase ): A : Dict = population_score[random.randint(0 , _lowercase )][0] A, A : Any = crossover(parent_a[0] , _lowercase ) # Append new string to the population list. pop.append(mutate(_lowercase , _lowercase ) ) pop.append(mutate(_lowercase , _lowercase ) ) return pop def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: A : Union[str, Any] = F'{N_POPULATION} must be bigger than {N_SELECTED}' raise ValueError(_lowercase ) # Verify that the target contains no genes besides the ones inside genes variable. A : List[Any] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: A : Optional[int] = F'{not_in_genes_list} is not in genes list, evolution cannot converge' raise ValueError(_lowercase ) # Generate random starting population. A : Optional[Any] = [] for _ in range(_lowercase ): population.append(''''''.join([random.choice(_lowercase ) for i in range(len(_lowercase ) )] ) ) # Just some logs to know what the algorithms is doing. A, A : int = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(_lowercase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. A : Optional[int] = [evaluate(_lowercase , _lowercase ) for item in population] # Check if there is a matching evolution. A : Optional[int] = sorted(_lowercase , key=lambda snake_case__ : x[1] , reverse=_lowercase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'\nGeneration: {generation}' F'\nTotal Population:{total_population}' F'\nBest score: {population_score[0][1]}' F'\nBest string: {population_score[0][0]}' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. A : List[Any] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(_lowercase ) # Normalize population score to be between 0 and 1. A : List[str] = [ (item, score / len(_lowercase )) for item, score in population_score ] # This is selection for i in range(_lowercase ): population.extend(select(population_score[int(_lowercase )] , _lowercase , _lowercase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(_lowercase ) > N_POPULATION: break if __name__ == "__main__": lowercase : str = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) lowercase : Any = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) lowercase , lowercase , lowercase : int = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : int = len(_SCREAMING_SNAKE_CASE ) A : Optional[Any] = [0] * len_array if len_array > 0: A : Optional[int] = array[0] for i in range(1 , _SCREAMING_SNAKE_CASE ): A : Union[str, Any] = self.prefix_sum[i - 1] + array[i] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if start == 0: return self.prefix_sum[end] return self.prefix_sum[end] - self.prefix_sum[start - 1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" A : Union[str, Any] = {0} for sum_item in self.prefix_sum: if sum_item - target_sum in sums: return True sums.add(_SCREAMING_SNAKE_CASE ) return False if __name__ == "__main__": import doctest doctest.testmod()
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[Any] = '''ZinengTang/tvlt-base''' A : Tuple = tempfile.mkdtemp() def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = self.get_image_processor() A : Dict = self.get_feature_extractor() A : Optional[Any] = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) processor.save_pretrained(self.tmpdirname ) A : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , lowerCamelCase_ ) self.assertIsInstance(processor.image_processor , lowerCamelCase_ ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[str] = self.get_image_processor() A : Optional[Any] = self.get_feature_extractor() A : Tuple = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) A : str = np.ones([12000] ) A : str = feature_extractor(lowerCamelCase_ , return_tensors='''np''' ) A : Union[str, Any] = processor(audio=lowerCamelCase_ , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = self.get_image_processor() A : int = self.get_feature_extractor() A : Optional[Any] = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) A : Dict = np.ones([3, 224, 224] ) A : str = image_processor(lowerCamelCase_ , return_tensors='''np''' ) A : List[str] = processor(images=lowerCamelCase_ , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Dict = self.get_image_processor() A : Optional[int] = self.get_feature_extractor() A : int = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) A : int = np.ones([12000] ) A : Tuple = np.ones([3, 224, 224] ) A : Any = processor(audio=lowerCamelCase_ , images=lowerCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(lowerCamelCase_ ): processor() def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.get_image_processor() A : List[Any] = self.get_feature_extractor() A : str = TvltProcessor(image_processor=lowerCamelCase_ , feature_extractor=lowerCamelCase_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 200 ): '''simple docstring''' A : List[Any] = [1, 2, 5, 10, 20, 50, 100, 200] A : Optional[Any] = [0] * (pence + 1) A : str = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(__lowerCAmelCase , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(2_00) == 7_36_82
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class A ( unittest.TestCase ): def __init__( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : str = parent def __lowerCAmelCase ( self ) -> str: """simple docstring""" return {} def lowerCAmelCase_ ( ): '''simple docstring''' A : Tuple = "<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR=\"FFFFFF\">\n <HR>\n <a href=\"http://google.com\">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style=\"color:#0000FF\">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>" A : Union[str, Any] = "\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n " return [html_string_a, html_string_a] @require_bsa class A ( a__ , unittest.TestCase ): __magic_name__ = MarkupLMFeatureExtractor if is_bsa_available() else None def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = MarkupLMFeatureExtractionTester(self ) @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return self.feature_extract_tester.prepare_feat_extract_dict() def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = self.feature_extraction_class() # Test not batched input A : Tuple = get_html_strings()[0] A : Union[str, Any] = feature_extractor(lowerCAmelCase__ ) # fmt: off A : List[Any] = [["sample document", "Goog", "This is one header", "This is a another Header", "Travel from", "SFO to JFK", "on May 2, 2015 at 2:00 pm. For details go to confirm.com", "Traveler", "name", "is", "John Doe"]] A : Dict = [["/html/head/title", "/html/body/a", "/html/body/h1", "/html/body/h2", "/html/body/p", "/html/body/p/p/b[1]", "/html/body/p/p/b[2]/i", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/b", "/html/body/p/p/div/h3", "/html/body/p/p/div/h3/p"]] # fmt: on self.assertEqual(encoding.nodes , lowerCAmelCase__ ) self.assertEqual(encoding.xpaths , lowerCAmelCase__ ) # Test batched A : Dict = get_html_strings() A : Optional[int] = feature_extractor(lowerCAmelCase__ ) # fmt: off A : Union[str, Any] = expected_nodes + [["My First Heading", "My first paragraph."]] A : Tuple = expected_xpaths + [["/html/body/h1", "/html/body/p"]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , lowerCAmelCase__ ) self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1000 , ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = parent A : List[str] = batch_size A : Optional[Any] = seq_length A : Any = is_training A : Union[str, Any] = use_input_mask A : str = use_token_type_ids A : Dict = use_labels A : Optional[int] = vocab_size A : int = hidden_size A : Any = num_hidden_layers A : Tuple = num_attention_heads A : Optional[Any] = intermediate_size A : List[str] = hidden_act A : List[Any] = hidden_dropout_prob A : Optional[Any] = attention_probs_dropout_prob A : Union[str, Any] = max_position_embeddings A : Any = type_vocab_size A : str = type_sequence_label_size A : Union[str, Any] = initializer_range A : Any = num_labels A : List[Any] = num_choices A : Tuple = scope A : Dict = range_bbox def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment A : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: A : int = bbox[i, j, 3] A : Tuple = bbox[i, j, 1] A : Optional[int] = t if bbox[i, j, 2] < bbox[i, j, 0]: A : int = bbox[i, j, 2] A : List[Any] = bbox[i, j, 0] A : int = t A : int = tf.convert_to_tensor(a__ ) A : List[str] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) A : List[Any] = None if self.use_token_type_ids: A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Optional[int] = None A : Optional[Any] = None A : List[str] = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : int = ids_tensor([self.batch_size] , self.num_choices ) A : int = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Dict = TFLayoutLMModel(config=a__ ) A : Tuple = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ ) A : str = model(a__ , a__ , token_type_ids=a__ ) A : List[Any] = model(a__ , a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[int] = TFLayoutLMForMaskedLM(config=a__ ) A : str = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = self.num_labels A : int = TFLayoutLMForSequenceClassification(config=a__ ) A : Dict = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Optional[int] = self.num_labels A : Any = TFLayoutLMForTokenClassification(config=a__ ) A : Any = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[Any] = TFLayoutLMForQuestionAnswering(config=a__ ) A : str = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Union[str, Any] = self.prepare_config_and_inputs() ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : str = config_and_inputs A : List[str] = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class A ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): __magic_name__ = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) __magic_name__ = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = True __magic_name__ = 10 def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Dict = TFLayoutLMModelTester(self ) A : Optional[int] = ConfigTester(self , config_class=a__ , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a__ ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[int] = TFLayoutLMModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" pass def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[int] = tf.convert_to_tensor([[101,1019,1014,1016,1037,1_2849,4747,1004,1_4246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,1_1300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,1_9274,2772,6205,2_7814,1_6147,1_6147,4343,2047,1_0283,1_0969,1_4389,1012,2338,102]] ) # noqa: E231 A : List[str] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 A : List[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 A : List[Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) A : Optional[Any] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) A, A, A, A, A : int = prepare_layoutlm_batch_inputs() # forward pass A : List[str] = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) # test the sequence output on [0, :3, :3] A : Optional[int] = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-3 ) ) # test the pooled output on [1, :3] A : Optional[int] = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a__ , atol=1e-3 ) ) @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) A, A, A, A, A : Tuple = prepare_layoutlm_batch_inputs() # forward pass A : Optional[int] = model( input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar A : List[str] = outputs.loss A : Dict = (2,) self.assertEqual(loss.shape , a__ ) # test the shape of the logits A : List[str] = outputs.logits A : str = (2, 2) self.assertEqual(logits.shape , a__ ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Any = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) A, A, A, A, A : List[Any] = prepare_layoutlm_batch_inputs() # forward pass A : Any = model( input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) # test the shape of the logits A : List[str] = outputs.logits A : Dict = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , a__ ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Union[str, Any] = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) A, A, A, A, A : Dict = prepare_layoutlm_batch_inputs() # forward pass A : List[Any] = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ ) # test the shape of the logits A : str = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , a__ ) self.assertEqual(outputs.end_logits.shape , a__ )
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Any = logging.get_logger(__name__) lowercase : str = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A ( __lowerCAmelCase ): __magic_name__ = '''realm''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1e-3 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=13353718 , SCREAMING_SNAKE_CASE=5000 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) # Common config A : Union[str, Any] = vocab_size A : Dict = max_position_embeddings A : str = hidden_size A : List[str] = retriever_proj_size A : Optional[Any] = num_hidden_layers A : Union[str, Any] = num_attention_heads A : Tuple = num_candidates A : str = intermediate_size A : Optional[int] = hidden_act A : List[Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : Union[str, Any] = initializer_range A : Union[str, Any] = type_vocab_size A : List[Any] = layer_norm_eps # Reader config A : List[str] = span_hidden_size A : str = max_span_width A : Optional[Any] = reader_layer_norm_eps A : Union[str, Any] = reader_beam_size A : Union[str, Any] = reader_seq_len # Retrieval config A : Union[str, Any] = num_block_records A : int = searcher_beam_size
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[str] = tempfile.mkdtemp() A : Dict = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] A : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) A : int = { """do_resize""": True, """size""": {"""height""": 224, """width""": 224}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073], """image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711], """do_convert_rgb""": True, } A : List[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] A : List[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = self.get_tokenizer() A : List[Any] = self.get_rust_tokenizer() A : Optional[Any] = self.get_image_processor() A : int = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) A : str = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) A : Any = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) A : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A : Any = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' ) A : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ ) A : str = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Any = self.get_image_processor() A : str = self.get_tokenizer() A : Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) A : Union[str, Any] = self.prepare_image_inputs() A : str = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) A : Optional[int] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Dict = self.get_image_processor() A : Union[str, Any] = self.get_tokenizer() A : int = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) A : List[Any] = """Alexandra,T-shirt的价格是15便士。""" A : Tuple = processor(text=SCREAMING_SNAKE_CASE_ ) A : List[str] = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Dict = self.get_image_processor() A : Dict = self.get_tokenizer() A : str = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) A : int = """Alexandra,T-shirt的价格是15便士。""" A : Any = self.prepare_image_inputs() A : Dict = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[Any] = self.get_image_processor() A : Dict = self.get_tokenizer() A : Tuple = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) A : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[Any] = self.get_image_processor() A : Union[str, Any] = self.get_tokenizer() A : List[Any] = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) A : Union[str, Any] = """Alexandra,T-shirt的价格是15便士。""" A : Dict = self.prepare_image_inputs() A : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ): __magic_name__ = MgpstrTokenizer __magic_name__ = False __magic_name__ = {} __magic_name__ = False def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().setUp() # fmt: off A : str = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on A : List[str] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[int] = '''tester''' A : List[str] = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" pass def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A : Dict = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) A : int = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ) , 1 ) A : Any = tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) self.assertTrue(special_token not in decoded ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'{tokenizer.__class__.__name__}' ): A : List[str] = self.get_input_output_texts(UpperCamelCase__ ) A : Optional[int] = tokenizer.tokenize(UpperCamelCase__ ) A : List[str] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) A : Optional[Any] = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) A : int = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertNotEqual(len(UpperCamelCase__ ) , 0 ) A : List[str] = tokenizer.decode(UpperCamelCase__ ) self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase__ ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' import enum import warnings from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowercase : Optional[Any] = logging.get_logger(__name__) class A ( enum.Enum ): __magic_name__ = 0 __magic_name__ = 1 @add_end_docstrings(_a ) class A ( _a ): __magic_name__ = """generated""" def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" A : Optional[int] = {} if truncation is not None: A : Tuple = truncation A : Tuple = generate_kwargs A : Dict = {} if return_tensors is not None and return_type is None: A : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT if return_type is not None: A : int = return_type if clean_up_tokenization_spaces is not None: A : List[Any] = clean_up_tokenization_spaces if stop_sequence is not None: A : Dict = self.tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 1: warnings.warn( '''Stopping on a multiple token sequence is not yet supported on transformers. The first token of''' ''' the stop sequence will be used as the stop sequence string in the interim.''' ) A : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return True def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : List[Any] = self.model.config.prefix if self.model.config.prefix is not None else '' if isinstance(args[0] , SCREAMING_SNAKE_CASE ): if self.tokenizer.pad_token_id is None: raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' ) A : int = ([prefix + arg for arg in args[0]],) A : Any = True elif isinstance(args[0] , SCREAMING_SNAKE_CASE ): A : Optional[Any] = (prefix + args[0],) A : int = False else: raise ValueError( F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' ) A : List[str] = self.tokenizer(*SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors=self.framework ) # This is produced by tokenizers but is an invalid generate kwargs if "token_type_ids" in inputs: del inputs["token_type_ids"] return inputs def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" A : Optional[int] = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if ( isinstance(args[0] , SCREAMING_SNAKE_CASE ) and all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for el in args[0] ) and all(len(SCREAMING_SNAKE_CASE ) == 1 for res in result ) ): return [res[0] for res in result] return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Dict = self._parse_and_tokenize(SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) return inputs def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if self.framework == "pt": A : int = model_inputs['input_ids'].shape elif self.framework == "tf": A : List[str] = tf.shape(model_inputs['''input_ids'''] ).numpy() A : str = generate_kwargs.get('''min_length''' , self.model.config.min_length ) A : str = generate_kwargs.get('''max_length''' , self.model.config.max_length ) self.check_inputs(SCREAMING_SNAKE_CASE , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] ) A : str = self.model.generate(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Union[str, Any] = output_ids.shape[0] if self.framework == "pt": A : Union[str, Any] = output_ids.reshape(SCREAMING_SNAKE_CASE , out_b // in_b , *output_ids.shape[1:] ) elif self.framework == "tf": A : List[Any] = tf.reshape(SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *output_ids.shape[1:]) ) return {"output_ids": output_ids} def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=ReturnType.TEXT , SCREAMING_SNAKE_CASE=False ) -> str: """simple docstring""" A : Optional[int] = [] for output_ids in model_outputs["output_ids"][0]: if return_type == ReturnType.TENSORS: A : Optional[int] = {F'{self.return_name}_token_ids': output_ids} elif return_type == ReturnType.TEXT: A : Optional[int] = { F'{self.return_name}_text': self.tokenizer.decode( SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , ) } records.append(SCREAMING_SNAKE_CASE ) return records @add_end_docstrings(_a ) class A ( _a ): __magic_name__ = """summary""" def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" if max_length < min_length: logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' ) if input_length < max_length: logger.warning( F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is ' '''a summarization task, where outputs shorter than the input are typically wanted, you might ''' F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' ) @add_end_docstrings(_a ) class A ( _a ): __magic_name__ = """translation""" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if input_length > 0.9 * max_length: logger.warning( F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider ' '''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' ) return True def __lowerCAmelCase ( self , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=TruncationStrategy.DO_NOT_TRUNCATE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[Any]: """simple docstring""" if getattr(self.tokenizer , '''_build_translation_inputs''' , SCREAMING_SNAKE_CASE ): return self.tokenizer._build_translation_inputs( *SCREAMING_SNAKE_CASE , return_tensors=self.framework , truncation=SCREAMING_SNAKE_CASE , src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE ) else: return super()._parse_and_tokenize(*SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Union[str, Any] = super()._sanitize_parameters(**SCREAMING_SNAKE_CASE ) if src_lang is not None: A : Any = src_lang if tgt_lang is not None: A : Any = tgt_lang if src_lang is None and tgt_lang is None: # Backward compatibility, direct arguments use is preferred. A : List[str] = kwargs.get('''task''' , self.task ) A : str = task.split('''_''' ) if task and len(SCREAMING_SNAKE_CASE ) == 4: # translation, XX, to YY A : Union[str, Any] = items[1] A : Optional[Any] = items[3] return preprocess_params, forward_params, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' from statistics import mean, stdev def lowerCAmelCase_ ( snake_case__ , snake_case__ = 3 ): '''simple docstring''' A : Optional[Any] = min(__UpperCamelCase ) A : Union[str, Any] = max(__UpperCamelCase ) # normalize data return [round((x - x_min) / (x_max - x_min) , __UpperCamelCase ) for x in data] def lowerCAmelCase_ ( snake_case__ , snake_case__ = 3 ): '''simple docstring''' A : Dict = mean(__UpperCamelCase ) A : Union[str, Any] = stdev(__UpperCamelCase ) # standardize data return [round((x - mu) / (sigma) , __UpperCamelCase ) for x in data]
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowercase : Tuple = { 'text_branch': 'text_model', 'audio_branch': 'audio_model.audio_encoder', 'attn': 'attention.self', 'self.proj': 'output.dense', 'attention.self_mask': 'attn_mask', 'mlp.fc1': 'intermediate.dense', 'mlp.fc2': 'output.dense', 'norm1': 'layernorm_before', 'norm2': 'layernorm_after', 'bn0': 'batch_norm', } lowercase : int = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A : int = create_model( '''HTSAT-tiny''' , '''roberta''' , snake_case_ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=snake_case_ , fusion_type='''aff_2d''' if enable_fusion else None , ) return model, model_cfg def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = {} A : Optional[int] = R'''.*sequential.(\d+).*''' A : Dict = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: A : List[str] = key.replace(snake_case_ , snake_case_ ) if re.match(snake_case_ , snake_case_ ): # replace sequential layers with list A : List[str] = re.match(snake_case_ , snake_case_ ).group(1 ) A : Tuple = key.replace(F'sequential.{sequential_layer}.' , F'layers.{int(snake_case_ )//3}.linear.' ) elif re.match(snake_case_ , snake_case_ ): A : str = int(re.match(snake_case_ , snake_case_ ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... A : Any = 1 if projecton_layer == 0 else 2 A : Optional[Any] = key.replace(F'_projection.{projecton_layer}.' , F'_projection.linear{transformers_projection_layer}.' ) if "audio" and "qkv" in key: # split qkv into query key and value A : Dict = value A : Tuple = mixed_qkv.size(0 ) // 3 A : List[str] = mixed_qkv[:qkv_dim] A : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2] A : List[str] = mixed_qkv[qkv_dim * 2 :] A : Any = query_layer A : List[Any] = key_layer A : List[str] = value_layer else: A : Optional[Any] = value return model_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A, A : Any = init_clap(snake_case_ , enable_fusion=snake_case_ ) clap_model.eval() A : Tuple = clap_model.state_dict() A : Union[str, Any] = rename_state_dict(snake_case_ ) A : Tuple = ClapConfig() A : Dict = enable_fusion A : str = ClapModel(snake_case_ ) # ignore the spectrogram embedding layer model.load_state_dict(snake_case_ , strict=snake_case_ ) model.save_pretrained(snake_case_ ) transformers_config.save_pretrained(snake_case_ ) if __name__ == "__main__": lowercase : List[str] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowercase : int = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets lowercase : List[Any] = '\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n' lowercase : List[str] = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n' lowercase : List[Any] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[ '''https://github.com/jhclark/tercom''', ] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , ) -> int: """simple docstring""" A : Optional[Any] = len(references[0] ) if any(len(__lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) A : str = [[refs[i] for refs in references] for i in range(__lowerCAmelCase )] A : Any = TER( normalized=__lowerCAmelCase , no_punct=__lowerCAmelCase , asian_support=__lowerCAmelCase , case_sensitive=__lowerCAmelCase , ) A : Dict = sb_ter.corpus_score(__lowerCAmelCase , __lowerCAmelCase ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = parent A : List[Any] = batch_size A : int = seq_length A : Tuple = is_training A : int = use_input_mask A : Dict = use_token_type_ids A : Optional[int] = use_labels A : int = vocab_size A : Tuple = hidden_size A : Union[str, Any] = num_hidden_layers A : int = num_attention_heads A : List[Any] = intermediate_size A : str = hidden_act A : str = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : Dict = max_position_embeddings A : int = type_vocab_size A : int = type_sequence_label_size A : Optional[Any] = initializer_range A : Tuple = num_labels A : Any = num_choices A : str = scope def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Any = random_attention_mask([self.batch_size, self.seq_length] ) A : Tuple = None if self.use_token_type_ids: A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Dict = None A : Union[str, Any] = None A : Optional[Any] = None if self.use_labels: A : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : Any = ids_tensor([self.batch_size] , self.num_choices ) A : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , use_stable_embedding=_A , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : int = OpenLlamaModel(config=_A ) model.to(_A ) model.eval() A : List[Any] = model(_A , attention_mask=_A ) A : int = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = True A : Any = OpenLlamaModel(_A ) model.to(_A ) model.eval() A : Dict = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , ) A : List[str] = model( _A , attention_mask=_A , encoder_hidden_states=_A , ) A : List[str] = model(_A , attention_mask=_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : Dict = OpenLlamaForCausalLM(config=_A ) model.to(_A ) model.eval() A : int = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : str = True A : Tuple = True A : List[str] = OpenLlamaForCausalLM(config=_A ) model.to(_A ) model.eval() # first forward pass A : Tuple = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , use_cache=_A , ) A : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : int = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) A : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) A : Any = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , output_hidden_states=_A , )['hidden_states'][0] A : Optional[Any] = model( _A , attention_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , past_key_values=_A , output_hidden_states=_A , )['hidden_states'][0] # select random slice A : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : str = output_from_no_past[:, -3:, random_slice_idx].detach() A : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_A , _A , atol=1e-3 ) ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : int = self.prepare_config_and_inputs() ( A ) : int = config_and_inputs A : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): __magic_name__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else () __magic_name__ = ( { "feature-extraction": OpenLlamaModel, "text-classification": OpenLlamaForSequenceClassification, "text-generation": OpenLlamaForCausalLM, "zero-shot": OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Tuple = OpenLlamaModelTester(self ) A : str = ConfigTester(self , config_class=_A , hidden_size=37 ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : Any = type self.model_tester.create_and_check_model(*_A ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A : List[Any] = 3 A : List[str] = input_dict['input_ids'] A : List[str] = input_ids.ne(1 ).to(_A ) A : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A : str = OpenLlamaForSequenceClassification(_A ) model.to(_A ) model.eval() A : Optional[Any] = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs_for_common() A : List[str] = 3 A : List[Any] = 'single_label_classification' A : int = input_dict['input_ids'] A : str = input_ids.ne(1 ).to(_A ) A : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A : str = OpenLlamaForSequenceClassification(_A ) model.to(_A ) model.eval() A : Union[str, Any] = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A : Optional[int] = 3 A : List[Any] = 'multi_label_classification' A : Union[str, Any] = input_dict['input_ids'] A : List[str] = input_ids.ne(1 ).to(_A ) A : str = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A : Union[str, Any] = OpenLlamaForSequenceClassification(_A ) model.to(_A ) model.eval() A : Union[str, Any] = model(_A , attention_mask=_A , labels=_A ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_common() A : str = ids_tensor([1, 10] , config.vocab_size ) A : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A : List[Any] = OpenLlamaModel(_A ) original_model.to(_A ) original_model.eval() A : str = original_model(_A ).last_hidden_state A : List[Any] = original_model(_A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A : Optional[Any] = {'type': scaling_type, 'factor': 10.0} A : Dict = OpenLlamaModel(_A ) scaled_model.to(_A ) scaled_model.eval() A : Dict = scaled_model(_A ).last_hidden_state A : str = scaled_model(_A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_A , _A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_A , _A , atol=1e-5 ) )
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = len(A__ ) for i in range(A__ ): for j in range(i + 1 , A__ ): if numbers[j] < numbers[i]: A : List[Any] = numbers[j], numbers[i] return numbers if __name__ == "__main__": lowercase : str = input('Enter numbers separated by a comma:\n').strip() lowercase : Optional[Any] = [int(item) for item in user_input.split(',')] print(exchange_sort(unsorted))
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=snake_case_ ).to(snake_case_ ) A : List[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' ) A : Optional[int] = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids A : Dict = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids A : Optional[Any] = model(input_ids.to(snake_case_ ) , labels=labels.to(snake_case_ ) ).loss A : Tuple = -(labels.shape[-1] * loss.item()) A : Dict = -84.9_127 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' import dataclasses import json import warnings from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) def lowerCAmelCase_ ( snake_case__=None , snake_case__=None ): '''simple docstring''' return field(default_factory=lambda: default , metadata=snake_case__ ) @dataclass class A : __magic_name__ = list_field( default=[] , metadata={ '''help''': ( '''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version''' ''' of all available models''' ) } , ) __magic_name__ = list_field( default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} ) __magic_name__ = list_field( default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Use FP16 to accelerate inference.'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Benchmark training of model'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Verbose memory tracing'''} ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , ) __magic_name__ = field( default=__snake_case , metadata={ '''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory''' } , ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Trace memory line by line'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Save result to a CSV file'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Save all print statements in a log file'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Whether to print environment information'''} ) __magic_name__ = field( default=__snake_case , metadata={ '''help''': ( '''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use''' ''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled''' ''' for debugging / testing and on TPU.''' ) } , ) __magic_name__ = field( default=F"inference_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , ) __magic_name__ = field( default=F"inference_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , ) __magic_name__ = field( default=F"train_time_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , ) __magic_name__ = field( default=F"train_memory_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , ) __magic_name__ = field( default=F"env_info_{round(time() )}.csv" , metadata={'''help''': '''CSV filename used if saving environment information.'''} , ) __magic_name__ = field( default=F"log_{round(time() )}.csv" , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , ) __magic_name__ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} ) __magic_name__ = field( default=__snake_case , metadata={ '''help''': ( '''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain''' ''' model weights.''' ) } , ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" warnings.warn( F'The class {self.__class__} is deprecated. Hugging Face Benchmarking utils' ''' are deprecated in general and it is advised to use external Benchmarking libraries ''' ''' to benchmark Transformer models.''' , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return json.dumps(dataclasses.asdict(self ) , indent=2 ) @property def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" if len(self.models ) <= 0: raise ValueError( '''Please make sure you provide at least one model name / model identifier, *e.g.* `--models''' ''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' ) return self.models @property def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" if not self.multi_process: return False elif self.is_tpu: logger.info('''Multiprocessing is currently not possible on TPU.''' ) return False else: return True
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' A : List[str] = nn.Parameter(snake_case__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' A : Union[str, Any] = nn.Parameter(snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = np.asarray(weights[0] ) A : Optional[Any] = np.asarray(weights[1] ) A : List[Any] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[str] = np.asarray(weights[0] ) A : str = np.asarray(weights[1] ) A : List[Any] = np.asarray(weights[2] ) A : str = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(snake_case__ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case__ ) , ) set_param( torch_layer.output.dense , torch.tensor(snake_case__ ).view(-1 , snake_case__ ).contiguous().transpose(0 , 1 ) , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Dict = weights[0][0][0] A : str = np.asarray(layer_norm_a[0] ) A : Union[str, Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , ) # lsh weights + output A : Optional[int] = weights[0][1] if len(snake_case__ ) < 4: set_layer_weights_in_torch_lsh(snake_case__ , torch_block.attention , snake_case__ ) else: set_layer_weights_in_torch_local(snake_case__ , torch_block.attention , snake_case__ ) # intermediate weighs A : Optional[int] = weights[2][0][1][2] # Chunked Feed Forward if len(snake_case__ ) == 4: A : Any = intermediate_weights[2] # layernorm 2 A : Optional[int] = np.asarray(intermediate_weights[0][0] ) A : List[str] = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , ) # intermediate dense A : Any = np.asarray(intermediate_weights[1][0] ) A : Any = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , ) # intermediate out A : List[str] = np.asarray(intermediate_weights[4][0] ) A : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = torch_model.reformer # word embeds A : str = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case__ ) , ) if isinstance(weights[3] , snake_case__ ): A : str = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): A : Tuple = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' A : List[str] = nn.Parameter(torch.tensor(snake_case__ ) ) A : Tuple = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( snake_case__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): A : List[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(snake_case__ , snake_case__ , snake_case__ ) # output layer norm A : int = np.asarray(weights[7][0] ) A : str = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case__ ) , torch.tensor(snake_case__ ) , ) # output embeddings A : str = np.asarray(weights[9][0] ) A : List[str] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(snake_case__ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case__ ) , ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : int = ReformerConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A : Any = ReformerModelWithLMHead(snake_case__ ) with open(snake_case__ , '''rb''' ) as f: A : Any = pickle.load(snake_case__ )['''weights'''] set_model_weights_in_torch(snake_case__ , snake_case__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case__ ) if __name__ == "__main__": lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase : Optional[Any] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = len(lowerCamelCase_ ) A : str = len(lowerCamelCase_ ) A : Union[str, Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] A : List[Any] = True for i in range(lowerCamelCase_ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: A : int = True if a[i].islower(): A : int = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowercase : Dict = { "configuration_pix2struct": [ "PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Pix2StructConfig", "Pix2StructTextConfig", "Pix2StructVisionConfig", ], "processing_pix2struct": ["Pix2StructProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Tuple = ["Pix2StructImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ "PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST", "Pix2StructPreTrainedModel", "Pix2StructForConditionalGeneration", "Pix2StructVisionModel", "Pix2StructTextModel", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : Optional[int] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' # using dfs for finding eulerian path traversal def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' A : Optional[int] = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: A, A : Union[str, Any] = True, True A : Optional[Any] = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return path def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = 0 A : Optional[Any] = -1 for i in range(lowerCAmelCase__ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 A : Tuple = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Union[str, Any] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] A, A : int = check_circuit_or_path(lowerCAmelCase__ , lowerCAmelCase__ ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return A : Union[str, Any] = 1 if check == 2: A : Optional[int] = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) A : Optional[int] = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) print(lowerCAmelCase__ ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} A : Dict = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} A : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} A : Any = {1: [2, 3], 2: [1, 3], 3: [1, 2]} A : Optional[Any] = { 1: [], 2: [] # all degree is zero } A : List[str] = 10 check_euler(lowerCAmelCase__ , lowerCAmelCase__ ) check_euler(lowerCAmelCase__ , lowerCAmelCase__ ) check_euler(lowerCAmelCase__ , lowerCAmelCase__ ) check_euler(lowerCAmelCase__ , lowerCAmelCase__ ) check_euler(lowerCAmelCase__ , lowerCAmelCase__ ) if __name__ == "__main__": main()
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return abs(snake_case__ ) if a == 0 else greatest_common_divisor(b % a , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' while y: # --> when y=0 then loop will terminate and return x as final GCD. A, A : Any = y, x % y return abs(snake_case__ ) def lowerCAmelCase_ ( ): '''simple docstring''' try: A : List[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) A : Union[str, Any] = int(nums[0] ) A : str = int(nums[1] ) print( F'greatest_common_divisor({num_a}, {num_a}) = ' F'{greatest_common_divisor(snake_case__ , snake_case__ )}' ) print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(snake_case__ , snake_case__ )}' ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: A : Tuple = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Optional[int] = TFAutoModel.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Union[str, Any] = AutoModel.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for model_name in ["bert-base-uncased"]: A : List[str] = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Any = TFAutoModelForPreTraining.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : int = AutoModelForPreTraining.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Any = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Any = TFAutoModelForCausalLM.from_pretrained(__A , from_pt=__A ) A : Tuple = TFAutoModelForCausalLM.from_pretrained( __A , output_loading_info=__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Tuple = AutoModelForCausalLM.from_pretrained(__A , from_tf=__A ) A : Optional[Any] = AutoModelForCausalLM.from_pretrained( __A , output_loading_info=__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : str = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : str = TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Optional[int] = AutoModelWithLMHead.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Dict = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : int = TFAutoModelForMaskedLM.from_pretrained(__A , from_pt=__A ) A : str = TFAutoModelForMaskedLM.from_pretrained( __A , output_loading_info=__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : List[str] = AutoModelForMaskedLM.from_pretrained(__A , from_tf=__A ) A : Optional[int] = AutoModelForMaskedLM.from_pretrained( __A , output_loading_info=__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : str = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Dict = TFAutoModelForSeqaSeqLM.from_pretrained(__A , from_pt=__A ) A : Any = TFAutoModelForSeqaSeqLM.from_pretrained( __A , output_loading_info=__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained(__A , from_tf=__A ) A : List[str] = AutoModelForSeqaSeqLM.from_pretrained( __A , output_loading_info=__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in ["bert-base-uncased"]: A : Optional[Any] = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Tuple = TFAutoModelForSequenceClassification.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model_name in ["bert-base-uncased"]: A : List[str] = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) A : str = AutoModelForQuestionAnswering.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[Any] = TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14410 ) A : Optional[Any] = AutoModelWithLMHead.from_pretrained(__A , from_tf=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14410 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14410 ) A : List[str] = AutoModelWithLMHead.from_pretrained(__A , from_tf=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14410 )
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowercase : List[Any] = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 10_00, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } lowercase : Any = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 10_00, 'block_out_channels': [1_92, 1_92 * 2, 1_92 * 3, 1_92 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } lowercase : str = { 'sample_size': 2_56, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [2_56, 2_56, 2_56 * 2, 2_56 * 2, 2_56 * 4, 2_56 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } lowercase : int = { 'num_train_timesteps': 40, 'sigma_min': 0.0_02, 'sigma_max': 80.0, } lowercase : List[str] = { 'num_train_timesteps': 2_01, 'sigma_min': 0.0_02, 'sigma_max': 80.0, } lowercase : Optional[Any] = { 'num_train_timesteps': 1_51, 'sigma_min': 0.0_02, 'sigma_max': 80.0, } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if isinstance(__snake_case , __snake_case ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=False ): '''simple docstring''' A : Optional[Any] = checkpoint[F'{old_prefix}.in_layers.0.weight'] A : List[str] = checkpoint[F'{old_prefix}.in_layers.0.bias'] A : Optional[Any] = checkpoint[F'{old_prefix}.in_layers.2.weight'] A : Optional[Any] = checkpoint[F'{old_prefix}.in_layers.2.bias'] A : List[str] = checkpoint[F'{old_prefix}.emb_layers.1.weight'] A : str = checkpoint[F'{old_prefix}.emb_layers.1.bias'] A : str = checkpoint[F'{old_prefix}.out_layers.0.weight'] A : Union[str, Any] = checkpoint[F'{old_prefix}.out_layers.0.bias'] A : Dict = checkpoint[F'{old_prefix}.out_layers.3.weight'] A : Dict = checkpoint[F'{old_prefix}.out_layers.3.bias'] if has_skip: A : int = checkpoint[F'{old_prefix}.skip_connection.weight'] A : Tuple = checkpoint[F'{old_prefix}.skip_connection.bias'] return new_checkpoint def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__=None ): '''simple docstring''' A, A, A : Tuple = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0 ) A, A, A : Tuple = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0 ) A : Union[str, Any] = checkpoint[F'{old_prefix}.norm.weight'] A : List[Any] = checkpoint[F'{old_prefix}.norm.bias'] A : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 ) A : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 ) A : str = weight_k.squeeze(-1 ).squeeze(-1 ) A : str = bias_k.squeeze(-1 ).squeeze(-1 ) A : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) A : Optional[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) A : int = ( checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1 ).squeeze(-1 ) ) A : Optional[int] = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = torch.load(__snake_case , map_location='''cpu''' ) A : Dict = {} A : str = checkpoint['''time_embed.0.weight'''] A : Optional[int] = checkpoint['''time_embed.0.bias'''] A : Dict = checkpoint['''time_embed.2.weight'''] A : Any = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: A : Dict = checkpoint['''label_emb.weight'''] A : str = checkpoint['''input_blocks.0.0.weight'''] A : Any = checkpoint['''input_blocks.0.0.bias'''] A : Union[str, Any] = unet_config['''down_block_types'''] A : List[str] = unet_config['''layers_per_block'''] A : List[str] = unet_config['''attention_head_dim'''] A : Union[str, Any] = unet_config['''block_out_channels'''] A : Tuple = 1 A : str = channels_list[0] for i, layer_type in enumerate(__snake_case ): A : List[str] = channels_list[i] A : Optional[int] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__snake_case ): A : Optional[int] = F'down_blocks.{i}.resnets.{j}' A : List[Any] = F'input_blocks.{current_layer}.0' A : Dict = True if j == 0 and downsample_block_has_skip else False A : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__snake_case ): A : int = F'down_blocks.{i}.resnets.{j}' A : Union[str, Any] = F'input_blocks.{current_layer}.0' A : Optional[Any] = True if j == 0 and downsample_block_has_skip else False A : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) A : str = F'down_blocks.{i}.attentions.{j}' A : Dict = F'input_blocks.{current_layer}.1' A : List[Any] = convert_attention( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: A : List[Any] = F'down_blocks.{i}.downsamplers.0' A : List[str] = F'input_blocks.{current_layer}.0' A : Union[str, Any] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) current_layer += 1 A : Optional[int] = current_channels # hardcoded the mid-block for now A : Tuple = '''mid_block.resnets.0''' A : Union[str, Any] = '''middle_block.0''' A : Union[str, Any] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) A : Optional[Any] = '''mid_block.attentions.0''' A : str = '''middle_block.1''' A : int = convert_attention(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) A : Union[str, Any] = '''mid_block.resnets.1''' A : Tuple = '''middle_block.2''' A : Optional[int] = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) A : str = 0 A : Dict = unet_config['''up_block_types'''] for i, layer_type in enumerate(__snake_case ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): A : Union[str, Any] = F'up_blocks.{i}.resnets.{j}' A : Optional[Any] = F'output_blocks.{current_layer}.0' A : Dict = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: A : int = F'up_blocks.{i}.upsamplers.0' A : List[Any] = F'output_blocks.{current_layer-1}.1' A : int = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): A : Dict = F'up_blocks.{i}.resnets.{j}' A : List[Any] = F'output_blocks.{current_layer}.0' A : Dict = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case , has_skip=__snake_case ) A : Optional[int] = F'up_blocks.{i}.attentions.{j}' A : Union[str, Any] = F'output_blocks.{current_layer}.1' A : Optional[Any] = convert_attention( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) current_layer += 1 if i != len(__snake_case ) - 1: A : Dict = F'up_blocks.{i}.upsamplers.0' A : Dict = F'output_blocks.{current_layer-1}.2' A : Any = convert_resnet(__snake_case , __snake_case , __snake_case , __snake_case ) A : List[str] = checkpoint['''out.0.weight'''] A : int = checkpoint['''out.0.bias'''] A : Dict = checkpoint['''out.2.weight'''] A : Dict = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": lowercase : Any = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') lowercase : Optional[Any] = parser.parse_args() lowercase : Optional[Any] = strabool(args.class_cond) lowercase : str = os.path.basename(args.unet_path) print(f'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: lowercase : Optional[int] = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase : Tuple = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowercase : Optional[int] = TEST_UNET_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: lowercase : List[Any] = None lowercase : List[Any] = con_pt_to_diffuser(args.unet_path, unet_config) lowercase : Dict = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowercase : Tuple = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowercase : Union[str, Any] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowercase : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''') lowercase : str = CMStochasticIterativeScheduler(**scheduler_config) lowercase : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : str = logging.get_logger(__name__) lowercase : Optional[Any] = { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json' ), } class A ( _snake_case ): __magic_name__ = '''dpr''' def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE = 0 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) A : str = vocab_size A : Any = hidden_size A : int = num_hidden_layers A : Optional[Any] = num_attention_heads A : int = hidden_act A : List[Any] = intermediate_size A : Dict = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : Optional[Any] = max_position_embeddings A : Tuple = type_vocab_size A : int = initializer_range A : int = layer_norm_eps A : Union[str, Any] = projection_dim A : Optional[int] = position_embedding_type
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_flax_available, is_torch_available, is_transformers_available, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .multicontrolnet import MultiControlNetModel from .pipeline_controlnet import StableDiffusionControlNetPipeline from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline if is_transformers_available() and is_flax_available(): from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument lowercase : Any = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = list(s_dict.keys() ) for key in keys: A : List[str] = R'.*/layers_(\d+)' A : Any = key if re.match(snake_case__ , snake_case__ ): A : str = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , snake_case__ ) A : List[Any] = R'(encoder|decoder)\/' if re.match(snake_case__ , snake_case__ ): A : Optional[Any] = re.match(snake_case__ , snake_case__ ).groups() if groups[0] == "encoder": A : Optional[int] = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , snake_case__ ) A : int = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , snake_case__ ) elif groups[0] == "decoder": A : Optional[int] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , snake_case__ ) A : Optional[int] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , snake_case__ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: A : List[str] = new_key.replace(snake_case__ , snake_case__ ) print(F'{key} -> {new_key}' ) A : Tuple = s_dict.pop(snake_case__ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: A : int = s_dict[ 'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: A : Tuple = s_dict[ 'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: A : Any = s_dict[key].shape[0] A : int = s_dict[key] for idx in range(snake_case__ ): A : Union[str, Any] = expert_weihts[idx] print(F'{key} -> {key.replace("expert/" , "nested fstring" )}' ) s_dict.pop(snake_case__ ) return s_dict lowercase : Dict = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' import regex as re with open(snake_case__ , '''r''' ) as f: A : Dict = f.read() A : Tuple = re.findall(R'''(.*) = ([0-9.]*)''' , snake_case__ ) A : int = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": A : List[str] = float(snake_case__ ) if '.' in value else int(snake_case__ ) A : str = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , snake_case__ )[0] A : Dict = str(activation[1] ) A : Tuple = num_experts A : Dict = SwitchTransformersConfig(**snake_case__ ) return config def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ): '''simple docstring''' print(F'Loading flax weights from : {flax_checkpoint_path}' ) A : int = checkpoints.load_tax_checkpoint(snake_case__ ) if gin_file is not None: A : List[Any] = convert_gin_to_config(snake_case__ , snake_case__ ) else: A : Dict = SwitchTransformersConfig.from_pretrained(snake_case__ ) A : Optional[Any] = SwitchTransformersForConditionalGeneration(snake_case__ ) A : Dict = flax_params['target'] A : List[Any] = flatten_dict(snake_case__ , sep='''/''' ) A : Optional[Any] = rename_keys(snake_case__ ) A : Optional[Any] = unflatten_dict(snake_case__ , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ ) print(F'Save PyTorch model to {pytorch_dump_path}' ) pt_model.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') lowercase : Optional[int] = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' from collections.abc import Sequence def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(lowerCAmelCase__ ) ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = 0.0 for coeff in reversed(lowerCAmelCase__ ): A : Optional[int] = result * x + coeff return result if __name__ == "__main__": lowercase : List[str] = (0.0, 0.0, 5.0, 9.3, 7.0) lowercase : Optional[int] = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase_ ) , '''Tatoeba directory does not exist.''' ) class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[int] = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowercase ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.resolver.convert_models(['''heb-eng'''] ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[int] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__lowercase ) assert mmeta["long_pair"] == "heb-eng"
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A : def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return None class A : def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return None class A ( unittest.TestCase ): __magic_name__ = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , '''tf''' , 12 , **__a ) @require_torch @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , '''pt''' , 12 , **__a ) @require_torch @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" from transformers import BertModel A : Dict = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode='''w+t''' ) as vocab_file: vocab_file.write('''\n'''.join(__a ) ) vocab_file.flush() A : List[Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: A : Optional[Any] = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , '''pt''' , 12 , __a ) @require_tf @slow def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: A : Union[str, Any] = self._test_export(__a , '''tf''' , 12 , **__a ) A : Any = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) @require_torch @slow def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: A : Optional[int] = self._test_export(__a , '''pt''' , 12 , **__a ) A : Any = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail('''Quantized model is bigger than initial ONNX model''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" try: # Compute path with TemporaryDirectory() as tempdir: A : Tuple = Path(__a ).joinpath('''model.onnx''' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def __lowerCAmelCase ( self ) -> int: """simple docstring""" from transformers import BertModel A : Union[str, Any] = BertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) A : Tuple = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__a , __a , '''pt''' ) @require_tf @require_tokenizers @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" from transformers import TFBertModel A : List[str] = TFBertModel(BertConfig.from_pretrained('''lysandre/tiny-bert-random''' ) ) A : Union[str, Any] = BertTokenizerFast.from_pretrained('''lysandre/tiny-bert-random''' ) self._test_infer_dynamic_axis(__a , __a , '''tf''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : List[str] = FeatureExtractionPipeline(__a , __a ) A : Optional[Any] = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] A, A, A, A : str = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: '''batch''', 1: '''sequence'''} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['''output_0'''] , {0: '''batch''', 1: '''sequence'''} ) self.assertDictEqual(shapes['''output_1'''] , {0: '''batch'''} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : int = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] A : Union[str, Any] = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} A, A : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens['''input_ids'''], tokens['''token_type_ids'''], tokens['''attention_mask''']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) A, A : str = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens['''input_ids'''] ) self.assertEqual(ordered_input_names[0] , '''input_ids''' ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Dict = generate_identified_filename(Path('''/home/something/my_fake_model.onnx''' ) , '''-test''' ) self.assertEqual('''/home/something/my_fake_model-test.onnx''' , generated.as_posix() )
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase : str = { 'configuration_megatron_bert': ['MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegatronBertConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ 'MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MegatronBertForCausalLM', 'MegatronBertForMaskedLM', 'MegatronBertForMultipleChoice', 'MegatronBertForNextSentencePrediction', 'MegatronBertForPreTraining', 'MegatronBertForQuestionAnswering', 'MegatronBertForSequenceClassification', 'MegatronBertForTokenClassification', 'MegatronBertModel', 'MegatronBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=33 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = parent A : str = batch_size A : str = seq_length A : List[str] = is_training A : List[str] = use_input_mask A : Tuple = use_token_type_ids A : str = use_labels A : List[Any] = vocab_size A : Any = hidden_size A : List[str] = num_hidden_layers A : Optional[int] = num_attention_heads A : Union[str, Any] = intermediate_size A : Optional[int] = hidden_act A : List[Any] = hidden_dropout_prob A : str = attention_probs_dropout_prob A : Tuple = max_position_embeddings A : Union[str, Any] = type_vocab_size A : List[str] = type_sequence_label_size A : List[Any] = initializer_range A : str = num_labels A : Dict = num_choices A : str = scope def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : List[Any] = None if self.use_input_mask: A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) A : str = None A : Dict = None A : Union[str, Any] = None if self.use_labels: A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : List[str] = ids_tensor([self.batch_size] , self.num_choices ) A : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = EsmModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) A : List[str] = model(UpperCamelCase__ ) A : Dict = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Tuple = EsmForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : Optional[int] = self.num_labels A : Tuple = EsmForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() A : int = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.prepare_config_and_inputs() ( A ) : int = config_and_inputs A : Dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = False __magic_name__ = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) __magic_name__ = () __magic_name__ = ( { """feature-extraction""": EsmModel, """fill-mask""": EsmForMaskedLM, """text-classification""": EsmForSequenceClassification, """token-classification""": EsmForTokenClassification, """zero-shot""": EsmForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : str = EsmModelTester(self ) A : str = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : Dict = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = EsmModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[Any] = self.model_tester.prepare_config_and_inputs()[0] A : str = EsmEmbeddings(config=UpperCamelCase__ ) A : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) A : int = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) A : Tuple = create_position_ids_from_input_ids(UpperCamelCase__ , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs()[0] A : str = EsmEmbeddings(config=UpperCamelCase__ ) A : Optional[int] = torch.empty(2 , 4 , 30 ) A : Any = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] A : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) A : Tuple = embeddings.create_position_ids_from_inputs_embeds(UpperCamelCase__ ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(UpperCamelCase__ , UpperCamelCase__ ) ) ) @unittest.skip('''Esm does not support embedding resizing''' ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" pass @unittest.skip('''Esm does not support embedding resizing''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" pass @require_torch class A ( __snake_case ): @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): A : Any = EsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) model.eval() A : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] ) A : Optional[int] = model(UpperCamelCase__ )[0] A : str = 33 A : Any = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) A : Optional[int] = torch.tensor( [[[8.9_215, -10.5_898, -6.4_671], [-6.3_967, -13.9_114, -1.1_212], [-7.7_812, -13.9_516, -3.7_406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" with torch.no_grad(): A : str = EsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' ) model.eval() A : List[str] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) A : List[Any] = model(UpperCamelCase__ )[0] # compare the actual values for a slice. A : Dict = torch.tensor( [[[0.1_444, 0.5_413, 0.3_248], [0.3_034, 0.0_053, 0.3_108], [0.3_228, -0.2_499, 0.3_415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
import unittest from diffusers.pipelines.pipeline_utils import is_safetensors_compatible class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__snake_case ) ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__snake_case ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Dict = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', '''unet/diffusion_pytorch_model.bin''', # Removed: 'unet/diffusion_pytorch_model.safetensors', ] self.assertFalse(is_safetensors_compatible(__snake_case ) ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] self.assertTrue(is_safetensors_compatible(__snake_case ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = [ '''safety_checker/pytorch_model.bin''', '''safety_checker/model.safetensors''', '''vae/diffusion_pytorch_model.bin''', '''vae/diffusion_pytorch_model.safetensors''', '''text_encoder/pytorch_model.bin''', # Removed: 'text_encoder/model.safetensors', '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] self.assertFalse(is_safetensors_compatible(__snake_case ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Union[str, Any] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] A : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = [ '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] A : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = [ '''unet/diffusion_pytorch_model.bin''', '''unet/diffusion_pytorch_model.safetensors''', ] A : int = '''fp16''' self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', '''unet/diffusion_pytorch_model.fp16.bin''', # Removed: 'unet/diffusion_pytorch_model.fp16.safetensors', ] A : Union[str, Any] = '''fp16''' self.assertFalse(is_safetensors_compatible(__snake_case , variant=__snake_case ) ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = [ '''text_encoder/pytorch_model.fp16.bin''', '''text_encoder/model.fp16.safetensors''', ] A : List[Any] = '''fp16''' self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Tuple = [ '''text_encoder/pytorch_model.bin''', '''text_encoder/model.safetensors''', ] A : List[str] = '''fp16''' self.assertTrue(is_safetensors_compatible(__snake_case , variant=__snake_case ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[str] = [ '''safety_checker/pytorch_model.fp16.bin''', '''safety_checker/model.fp16.safetensors''', '''vae/diffusion_pytorch_model.fp16.bin''', '''vae/diffusion_pytorch_model.fp16.safetensors''', '''text_encoder/pytorch_model.fp16.bin''', # 'text_encoder/model.fp16.safetensors', '''unet/diffusion_pytorch_model.fp16.bin''', '''unet/diffusion_pytorch_model.fp16.safetensors''', ] A : int = '''fp16''' self.assertFalse(is_safetensors_compatible(__snake_case , variant=__snake_case ) )
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class A ( __snake_case ): __magic_name__ = '''''' __magic_name__ = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(self , **SCREAMING_SNAKE_CASE ) A : List[Any] = repo_info A : Any = token A : Dict = None def __lowerCAmelCase ( self ) -> str: """simple docstring""" if self.dir_cache is None: A : Dict = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes A : Dict = { '''name''': hf_file.rfilename, '''size''': None, '''type''': '''file''', } self.dir_cache.update( { str(SCREAMING_SNAKE_CASE ): {'''name''': str(SCREAMING_SNAKE_CASE ), '''size''': None, '''type''': '''directory'''} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "rb" , **SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE ): raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' ) A : Dict = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE , revision=self.repo_info.sha ) return fsspec.open( SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open() def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self._get_dirs() A : List[str] = self._strip_protocol(SCREAMING_SNAKE_CASE ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" self._get_dirs() A : Optional[Any] = PurePosixPath(path.strip('''/''' ) ) A : Optional[int] = {} for p, f in self.dir_cache.items(): A : Dict = PurePosixPath(p.strip('''/''' ) ) A : List[str] = p.parent if root == path: A : Optional[Any] = f A : Tuple = list(paths.values() ) if detail: return out else: return sorted(f['''name'''] for f in out )
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' from __future__ import annotations class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> None: """simple docstring""" A : Any = data A : Node | None = None A : Node | None = None def lowerCAmelCase_ ( snake_case__ ): # In Order traversal of the tree '''simple docstring''' if tree: display(tree.left ) print(tree.data ) display(tree.right ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0 def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def lowerCAmelCase_ ( ): # Main function for testing. '''simple docstring''' A : Tuple = Node(1 ) A : int = Node(2 ) A : Optional[Any] = Node(3 ) A : List[Any] = Node(4 ) A : Tuple = Node(5 ) A : Union[str, Any] = Node(6 ) A : Tuple = Node(7 ) A : Any = Node(8 ) A : int = Node(9 ) print(is_full_binary_tree(snake_case__ ) ) print(depth_of_tree(snake_case__ ) ) print('''Tree is: ''' ) display(snake_case__ ) if __name__ == "__main__": main()
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ = 10**9 ): '''simple docstring''' A : List[str] = 1 A : Union[str, Any] = 2 A : Union[str, Any] = 0 A : Any = 0 A : Union[str, Any] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A : Tuple = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'''{solution() = }''')
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowercase : List[Any] = get_logger(__name__) class A : __magic_name__ = '''dummy_data''' __magic_name__ = '''datasets''' __magic_name__ = False def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , ) -> Tuple: """simple docstring""" A : Dict = 0 A : Optional[int] = dataset_name A : int = cache_dir A : Tuple = use_local_dummy_data A : Tuple = config # download_callbacks take a single url as input A : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root A : Dict = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general A : Tuple = str(SCREAMING_SNAKE_CASE ) # to be downloaded A : Any = None A : str = None @property def __lowerCAmelCase ( self ) -> Any: """simple docstring""" if self._dummy_file is None: A : List[Any] = self.download_dummy_data() return self._dummy_file @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join('''dummy''' , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join('''dummy''' , self.version_name ) @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[str] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) A : int = cached_path( SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE , force_extract=SCREAMING_SNAKE_CASE ) return os.path.join(SCREAMING_SNAKE_CASE , self.dummy_file_name ) @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" if self._bucket_url is None: A : Optional[int] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) ) return self._bucket_url @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested A : Union[str, Any] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned A : List[Any] = self.dummy_file_name # special case when data_url is a dict if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ): return self.create_dummy_data_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: return self.create_dummy_data_single(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return self.download_and_extract(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" return self.download_and_extract(SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return path def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return {} def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): for single_url in single_urls: download_callback(SCREAMING_SNAKE_CASE ) else: A : Optional[int] = single_urls download_callback(SCREAMING_SNAKE_CASE ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Any = [os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE ).name ) ) for x in single_urls] else: A : str = single_urls A : str = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE ).name ) ) A : int = value # make sure that values are unique if all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique A : List[str] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Dict = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one A : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , SCREAMING_SNAKE_CASE ) ) for url in data_url ) A : Any = all( url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): A : Optional[Any] = [data_url[0]] * len(SCREAMING_SNAKE_CASE ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(SCREAMING_SNAKE_CASE ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus A : int = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) ) dummy_data_list.append(SCREAMING_SNAKE_CASE ) return dummy_data_list def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" for download_callback in self.download_callbacks: download_callback(SCREAMING_SNAKE_CASE ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus A : Dict = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) ) if os.path.exists(SCREAMING_SNAKE_CASE ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" pass def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" def _iter_archive_members(SCREAMING_SNAKE_CASE ): # this preserves the order of the members inside the ZIP archive A : Optional[int] = Path(self.dummy_file ).parent A : str = path.relative_to(SCREAMING_SNAKE_CASE ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: A : Optional[Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = Path(SCREAMING_SNAKE_CASE ) A : List[Any] = _iter_archive_members(SCREAMING_SNAKE_CASE ) if self.use_local_dummy_data else path.rglob('''*''' ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ): yield file_path.relative_to(SCREAMING_SNAKE_CASE ).as_posix(), file_path.open('''rb''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[Any] = [paths] for path in paths: if os.path.isfile(SCREAMING_SNAKE_CASE ): if os.path.basename(SCREAMING_SNAKE_CASE ).startswith(('''.''', '''__''') ): return yield path else: for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE ): if os.path.basename(SCREAMING_SNAKE_CASE ).startswith(('''.''', '''__''') ): continue dirnames.sort() for filename in sorted(SCREAMING_SNAKE_CASE ): if filename.startswith(('''.''', '''__''') ): continue yield os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' import argparse import copy def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[Any] = {} with open(snake_case__ ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: A : List[str] = [] _list.append([line.split()[1], line.split()[2]] ) A : Any = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: A : Optional[int] = [] _list.append([line.split()[0], line.split()[2]] ) A : int = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ ) as f: A : Tuple = f.read(1 ) A : Any = start_node A : List[Any] = [] A : List[str] = start_node A : Union[str, Any] = 0 while visiting not in first_solution: A : Optional[Any] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(snake_case__ ) and k[0] not in first_solution: A : List[str] = k[1] A : int = k[0] first_solution.append(snake_case__ ) A : List[str] = distance_of_first_solution + int(snake_case__ ) A : Any = best_node first_solution.append(snake_case__ ) A : Optional[int] = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 A : Tuple = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = [] for n in solution[1:-1]: A : Union[str, Any] = solution.index(snake_case__ ) for kn in solution[1:-1]: A : str = solution.index(snake_case__ ) if n == kn: continue A : Tuple = copy.deepcopy(snake_case__ ) A : str = kn A : Union[str, Any] = n A : Optional[int] = 0 for k in _tmp[:-1]: A : List[str] = _tmp[_tmp.index(snake_case__ ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: A : List[str] = distance + int(i[1] ) _tmp.append(snake_case__ ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) A : Dict = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda snake_case__ : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : str = 1 A : str = first_solution A : Any = [] A : Optional[Any] = distance_of_first_solution A : Optional[int] = solution while count <= iters: A : List[Any] = find_neighborhood(snake_case__ , snake_case__ ) A : Any = 0 A : str = neighborhood[index_of_best_solution] A : List[Any] = len(snake_case__ ) - 1 A : List[Any] = False while not found: A : Dict = 0 while i < len(snake_case__ ): if best_solution[i] != solution[i]: A : List[str] = best_solution[i] A : Optional[Any] = solution[i] break A : str = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) A : List[str] = True A : Optional[int] = best_solution[:-1] A : Optional[int] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: A : Optional[Any] = cost A : int = solution else: A : str = index_of_best_solution + 1 A : Optional[Any] = neighborhood[index_of_best_solution] if len(snake_case__ ) >= size: tabu_list.pop(0 ) A : List[Any] = count + 1 return best_solution_ever, best_cost def lowerCAmelCase_ ( snake_case__=None ): '''simple docstring''' A : Union[str, Any] = generate_neighbours(args.File ) A : List[Any] = generate_first_solution( args.File , snake_case__ ) A : List[str] = tabu_search( snake_case__ , snake_case__ , snake_case__ , args.Iterations , args.Size , ) print(F'Best solution: {best_sol}, with total distance: {best_cost}.' ) if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser(description='Tabu Search') parser.add_argument( '-f', '--File', type=str, help='Path to the file containing the data', required=True, ) parser.add_argument( '-i', '--Iterations', type=int, help='How many iterations the algorithm should perform', required=True, ) parser.add_argument( '-s', '--Size', type=int, help='Size of the tabu list', required=True ) # Pass the arguments to main method main(parser.parse_args())
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
import torch from diffusers import CMStochasticIterativeScheduler from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (CMStochasticIterativeScheduler,) __magic_name__ = 10 def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Optional[Any] = { '''num_train_timesteps''': 201, '''sigma_min''': 0.002, '''sigma_max''': 80.0, } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[Any] = 10 A : Union[str, Any] = self.get_scheduler_config() A : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler.timesteps[0] A : List[Any] = scheduler.timesteps[1] A : List[str] = self.dummy_sample A : List[str] = 0.1 * sample A : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" for clip_denoised in [True, False]: self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.scheduler_classes[0] A : int = self.get_scheduler_config() A : int = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 1 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) A : int = scheduler.timesteps A : int = torch.manual_seed(0 ) A : Optional[int] = self.dummy_model() A : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma for i, t in enumerate(SCREAMING_SNAKE_CASE ): # 1. scale model input A : int = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual A : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 A : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Any = pred_prev_sample A : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 192.7_614 ) < 1e-2 assert abs(result_mean.item() - 0.2_510 ) < 1e-3 def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = self.scheduler_classes[0] A : Tuple = self.get_scheduler_config() A : int = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = [106, 0] scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) A : str = scheduler.timesteps A : Any = torch.manual_seed(0 ) A : Optional[Any] = self.dummy_model() A : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma for t in timesteps: # 1. scale model input A : List[str] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 2. predict noise residual A : Tuple = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # 3. predict previous sample x_t-1 A : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = pred_prev_sample A : Dict = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 347.6_357 ) < 1e-2 assert abs(result_mean.item() - 0.4_527 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = self.scheduler_classes[0] A : Any = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : str = [39, 30, 12, 15, 0] with self.assertRaises(SCREAMING_SNAKE_CASE , msg='''`timesteps` must be in descending order.''' ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Tuple = self.scheduler_classes[0] A : Dict = self.get_scheduler_config() A : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[int] = [39, 30, 12, 1, 0] A : Optional[int] = len(SCREAMING_SNAKE_CASE ) with self.assertRaises(SCREAMING_SNAKE_CASE , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ): scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : List[str] = self.scheduler_classes[0] A : List[str] = self.get_scheduler_config() A : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = [scheduler.config.num_train_timesteps] with self.assertRaises( SCREAMING_SNAKE_CASE , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ): scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' def lowerCAmelCase_ ( ): '''simple docstring''' A : int = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] A : int = 6 A : List[str] = 1 A : int = 1901 A : List[Any] = 0 while year < 2001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 A : List[str] = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 A : Union[str, Any] = day - 29 else: if day > days_per_month[month - 1]: month += 1 A : Dict = day - days_per_month[month - 2] if month > 12: year += 1 A : Tuple = 1 if year < 2001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' import cva import numpy as np class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if k in (0.04, 0.06): A : str = k A : Dict = window_size else: raise ValueError('''invalid k value''' ) def __str__( self ) -> str: """simple docstring""" return str(self.k ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" A : List[Any] = cva.imread(SCREAMING_SNAKE_CASE , 0 ) A : Dict = img.shape A : list[list[int]] = [] A : List[str] = img.copy() A : str = cva.cvtColor(SCREAMING_SNAKE_CASE , cva.COLOR_GRAY2RGB ) A : Any = np.gradient(SCREAMING_SNAKE_CASE ) A : List[str] = dx**2 A : Dict = dy**2 A : Optional[Any] = dx * dy A : List[Any] = 0.04 A : int = self.window_size // 2 for y in range(SCREAMING_SNAKE_CASE , h - offset ): for x in range(SCREAMING_SNAKE_CASE , w - offset ): A : Tuple = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A : Any = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A : Optional[Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() A : int = (wxx * wyy) - (wxy**2) A : Optional[Any] = wxx + wyy A : List[Any] = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": lowercase : Optional[int] = HarrisCorner(0.04, 3) lowercase : Dict = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import re from ..utils import cached_file # docstyle-ignore lowercase : List[str] = '\nHuman: <<task>>\n\nAssistant: ' lowercase : int = 'huggingface-tools/default-prompts' lowercase : Any = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__="run" ): '''simple docstring''' if prompt_or_repo_id is None: A : Dict = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search('''\\s''' , snake_case__ ) is not None: return prompt_or_repo_id A : Optional[Any] = cached_file( snake_case__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} ) with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f: return f.read()
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' import math def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCAmelCase_ ( snake_case__ = 1_0001 ): '''simple docstring''' try: A : Optional[Any] = int(snake_case__ ) except (TypeError, ValueError): raise TypeError('''Parameter nth must be int or castable to int.''' ) from None if nth <= 0: raise ValueError('''Parameter nth must be greater than or equal to one.''' ) A : list[int] = [] A : Optional[Any] = 2 while len(snake_case__ ) < nth: if is_prime(snake_case__ ): primes.append(snake_case__ ) num += 1 else: num += 1 return primes[len(snake_case__ ) - 1] if __name__ == "__main__": print(f'''{solution() = }''')
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class A ( __snake_case ): __magic_name__ = '''facebook/bart-large-mnli''' __magic_name__ = ( '''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ''' '''should be the text to classify, and `labels`, which should be the list of labels to use for classification. ''' '''It returns the most likely label in the list of provided `labels` for the input text.''' ) __magic_name__ = '''text_classifier''' __magic_name__ = AutoTokenizer __magic_name__ = AutoModelForSequenceClassification __magic_name__ = ['''text''', ['''text''']] __magic_name__ = ['''text'''] def __lowerCAmelCase ( self ) -> int: """simple docstring""" super().setup() A : Optional[int] = self.model.config A : str = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): A : Tuple = int(SCREAMING_SNAKE_CASE ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : List[str] = labels return self.pre_processor( [text] * len(SCREAMING_SNAKE_CASE ) , [F'This example is {label}' for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : str = outputs.logits A : str = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase : int = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : str = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys lowercase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = len(snake_case__ ) + 1 A : Tuple = len(snake_case__ ) + 1 # dp is a 2d matrix where dp[i][j] denotes whether prefix string of # length i of input_string matches with prefix string of length j of # given pattern. # "dp" stands for dynamic programming. A : int = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )] # since string of zero length match pattern of zero length A : Dict = 1 # since pattern of zero length will never match with string of non-zero length for i in range(1 , snake_case__ ): A : Optional[Any] = 0 # since string of zero length will match with pattern where there # is at least one * alternatively for j in range(1 , snake_case__ ): A : str = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0 # now using bottom-up approach to find for all remaining lengths for i in range(1 , snake_case__ ): for j in range(1 , snake_case__ ): if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".": A : Optional[Any] = dp[i - 1][j - 1] elif pattern[j - 1] == "*": if dp[i][j - 2] == 1: A : Dict = 1 elif pattern[j - 2] in (input_string[i - 1], "."): A : int = dp[i - 1][j] else: A : Optional[Any] = 0 else: A : Optional[Any] = 0 return bool(dp[-1][-1] ) if __name__ == "__main__": import doctest doctest.testmod() # inputing the strings # input_string = input("input a string :") # pattern = input("input a pattern :") lowercase : str = 'aab' lowercase : Any = 'c*a*b' # using function to check whether given string matches the given pattern if match_pattern(input_string, pattern): print(f'''{input_string} matches the given pattern {pattern}''') else: print(f'''{input_string} does not match with the given pattern {pattern}''')
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase : Union[str, Any] = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt') lowercase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys()) lowercase : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' with open(snake_case__ , '''rb''' ) as f: A : Any = Image.open(snake_case__ ) return im.convert('''RGB''' ) @dataclass class A : __magic_name__ = field( default=__snake_case , metadata={ '''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).''' } , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''A folder containing the training data.'''} ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''A folder containing the validation data.'''} ) __magic_name__ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) __magic_name__ = field( default=__snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) __magic_name__ = field( default=__snake_case , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( '''You must specify either a dataset name from the hub or a train and/or validation directory.''' ) @dataclass class A : __magic_name__ = field( default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(__snake_case )} , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} ) __magic_name__ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) __magic_name__ = field(default=__snake_case , metadata={'''help''': '''Name or path of preprocessor config.'''} ) __magic_name__ = field( default=__snake_case , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = torch.stack([example['''pixel_values'''] for example in examples] ) A : str = torch.tensor([example['''labels'''] for example in examples] ) return {"pixel_values": pixel_values, "labels": labels} def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A : Any = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_image_classification''' , snake_case__ , snake_case__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case__ ) transformers.utils.logging.set_verbosity(snake_case__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(F'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. A : Optional[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # Initialize our dataset and prepare it for the 'image-classification' task. if data_args.dataset_name is not None: A : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , ) else: A : int = {} if data_args.train_dir is not None: A : int = os.path.join(data_args.train_dir , '''**''' ) if data_args.validation_dir is not None: A : Optional[int] = os.path.join(data_args.validation_dir , '''**''' ) A : int = load_dataset( '''imagefolder''' , data_files=snake_case__ , cache_dir=model_args.cache_dir , task='''image-classification''' , ) # If we don't have a validation split, split off a percentage of train as validation. A : str = None if '''validation''' in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case__ ) and data_args.train_val_split > 0.0: A : Dict = dataset['''train'''].train_test_split(data_args.train_val_split ) A : List[str] = split['''train'''] A : str = split['''test'''] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A : int = dataset['''train'''].features['''labels'''].names A : List[Any] = {}, {} for i, label in enumerate(snake_case__ ): A : List[str] = str(snake_case__ ) A : Tuple = label # Load the accuracy metric from the datasets package A : Dict = evaluate.load('''accuracy''' ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(snake_case__ ): return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids ) A : int = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel=snake_case__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A : Dict = AutoModelForImageClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) A : Any = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # Define torchvision transforms to be applied to each image. if "shortest_edge" in image_processor.size: A : Optional[Any] = image_processor.size['''shortest_edge'''] else: A : Optional[Any] = (image_processor.size['''height'''], image_processor.size['''width''']) A : Optional[Any] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std ) A : List[str] = Compose( [ RandomResizedCrop(snake_case__ ), RandomHorizontalFlip(), ToTensor(), normalize, ] ) A : Tuple = Compose( [ Resize(snake_case__ ), CenterCrop(snake_case__ ), ToTensor(), normalize, ] ) def train_transforms(snake_case__ ): A : List[Any] = [ _train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image'''] ] return example_batch def val_transforms(snake_case__ ): A : Optional[int] = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']] return example_batch if training_args.do_train: if "train" not in dataset: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: A : Optional[Any] = ( dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms dataset["train"].set_transform(snake_case__ ) if training_args.do_eval: if "validation" not in dataset: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: A : Tuple = ( dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms dataset["validation"].set_transform(snake_case__ ) # Initalize our trainer A : Dict = Trainer( model=snake_case__ , args=snake_case__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , ) # Training if training_args.do_train: A : Optional[int] = None if training_args.resume_from_checkpoint is not None: A : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: A : List[str] = last_checkpoint A : Tuple = trainer.train(resume_from_checkpoint=snake_case__ ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A : str = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case__ ) trainer.save_metrics('''eval''' , snake_case__ ) # Write model card and (optionally) push to hub A : Any = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''image-classification''', '''dataset''': data_args.dataset_name, '''tags''': ['''image-classification''', '''vision'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case__ ) else: trainer.create_model_card(**snake_case__ ) if __name__ == "__main__": main()
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class A ( __snake_case ): __magic_name__ = '''dandelin/vilt-b32-finetuned-vqa''' __magic_name__ = ( '''This is a tool that answers a question about an image. It takes an input named `image` which should be the ''' '''image containing the information, as well as a `question` which should be the question in English. It ''' '''returns a text that is the answer to the question.''' ) __magic_name__ = '''image_qa''' __magic_name__ = AutoProcessor __magic_name__ = AutoModelForVisualQuestionAnswering __magic_name__ = ['''image''', '''text'''] __magic_name__ = ['''text'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" return self.pre_processor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" with torch.no_grad(): return self.model(**SCREAMING_SNAKE_CASE ).logits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[int] = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A ( __snake_case ): def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , '''num_attention_heads''' ) ) class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=640 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="silu" , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=None , ) -> Dict: """simple docstring""" A : Any = parent A : Any = batch_size A : Any = image_size A : List[Any] = patch_size A : List[Any] = num_channels A : List[str] = last_hidden_size A : int = num_attention_heads A : List[str] = hidden_act A : List[str] = conv_kernel_size A : Optional[int] = output_stride A : List[str] = hidden_dropout_prob A : int = attention_probs_dropout_prob A : Union[str, Any] = classifier_dropout_prob A : int = use_labels A : int = is_training A : str = num_labels A : List[str] = initializer_range A : int = scope def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Union[str, Any] = None A : Union[str, Any] = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.num_labels ) A : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A : Optional[Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __lowerCAmelCase ( self ) -> str: """simple docstring""" return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Dict = MobileViTModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : Optional[int] = self.num_labels A : Optional[Any] = MobileViTForImageClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Dict = self.num_labels A : Union[str, Any] = MobileViTForSemanticSegmentation(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) A : int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A : List[Any] = config_and_inputs A : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = MobileViTModelTester(self ) A : Optional[int] = MobileViTConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) A : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" pass def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): A : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) A : str = outputs.hidden_states A : Tuple = 5 self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. A : List[str] = 2 for i in range(len(SCREAMING_SNAKE_CASE ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Dict = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A : Any = True check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : Optional[int] = MobileViTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(SCREAMING_SNAKE_CASE ) A : int = self.default_image_processor A : Dict = prepare_img() A : List[str] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : List[Any] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : List[Any] = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) A : Optional[int] = model.to(SCREAMING_SNAKE_CASE ) A : Tuple = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) A : Union[str, Any] = prepare_img() A : str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : List[Any] = model(**SCREAMING_SNAKE_CASE ) A : List[Any] = outputs.logits # verify the logits A : Optional[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE ) A : List[Any] = torch.tensor( [ [[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]], [[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]], [[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]], ] , device=SCREAMING_SNAKE_CASE , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) A : Dict = model.to(SCREAMING_SNAKE_CASE ) A : Any = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) A : List[str] = prepare_img() A : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): A : Dict = model(**SCREAMING_SNAKE_CASE ) A : Union[str, Any] = outputs.logits.detach().cpu() A : str = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE , target_sizes=[(50, 60)] ) A : List[str] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE ) A : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE ) A : Optional[int] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , SCREAMING_SNAKE_CASE )
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0
'''simple docstring''' from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class A : __magic_name__ = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) __magic_name__ = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) __magic_name__ = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) __magic_name__ = field( default=__snake_case , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Dict = HfArgumentParser((ModelArguments,) ) (A ) : str = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: A : str = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: A : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: A : Any = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: A : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed A : Any = True A : Dict = True A : List[str] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case__ , decoder_config=snake_case__ , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens A : int = decoder_config.decoder_start_token_id A : int = decoder_config.pad_token_id if decoder_start_token_id is None: A : Tuple = decoder_config.bos_token_id if pad_token_id is None: A : Union[str, Any] = decoder_config.eos_token_id # This is necessary to make Flax's generate() work A : List[str] = decoder_config.eos_token_id A : List[Any] = decoder_start_token_id A : Any = pad_token_id A : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) A : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) A : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
367
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (UniPCMultistepScheduler,) __magic_name__ = (('''num_inference_steps''', 25),) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" A : str = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''solver_order''': 2, '''solver_type''': '''bh2''', } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[Any] = dict(self.forward_default_kwargs ) A : Union[str, Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : int = 0.1 * sample A : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals A : Dict = dummy_past_residuals[: new_scheduler.config.solver_order] A, A : Tuple = sample, sample for t in range(SCREAMING_SNAKE_CASE , time_step + scheduler.config.solver_order + 1 ): A : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : Optional[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Optional[Any] = dict(self.forward_default_kwargs ) A : Tuple = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_sample A : int = 0.1 * sample A : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10] for scheduler_class in self.scheduler_classes: A : Optional[int] = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals (must be after setting timesteps) A : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(SCREAMING_SNAKE_CASE ) A : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE ) # copy over dummy past residuals new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) # copy over dummy past residual (must be after setting timesteps) A : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = new_scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" if scheduler is None: A : Dict = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10 A : Tuple = self.dummy_model() A : Any = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : int = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Tuple = dict(self.forward_default_kwargs ) A : List[Any] = kwargs.pop('''num_inference_steps''' , SCREAMING_SNAKE_CASE ) for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config() A : Dict = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = self.dummy_sample A : Optional[int] = 0.1 * sample if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE , '''set_timesteps''' ): A : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) A : Dict = [residual + 0.2, residual + 0.15, residual + 0.10] A : List[str] = dummy_past_residuals[: scheduler.config.solver_order] A : List[Any] = scheduler.timesteps[5] A : Dict = scheduler.timesteps[6] A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = UniPCMultistepScheduler(**self.get_scheduler_config() ) A : List[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 A : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config ) A : Optional[int] = DEISMultistepScheduler.from_config(scheduler.config ) A : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) A : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config ) A : Optional[Any] = self.full_loop(scheduler=SCREAMING_SNAKE_CASE ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for order in [1, 2, 3]: for solver_type in ["bh1", "bh2"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for solver_type in ["bh1", "bh2"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) A : Dict = self.full_loop( solver_order=SCREAMING_SNAKE_CASE , solver_type=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , ) assert not torch.isnan(SCREAMING_SNAKE_CASE ).any(), "Samples have nan numbers" def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE , time_step=0 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.full_loop() A : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.2_464 ) < 1e-3 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : List[Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_mean.item() - 0.1_014 ) < 1e-3 def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.scheduler_classes[0] A : List[Any] = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE , dynamic_thresholding_ratio=0 ) A : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : Tuple = 10 A : Union[str, Any] = self.dummy_model() A : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for i, t in enumerate(scheduler.timesteps ): A : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample assert sample.dtype == torch.floataa def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" for scheduler_class in self.scheduler_classes: A : Dict = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(scheduler.config.num_train_timesteps ) assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
311
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class A : __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = 1 __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None def __lowerCAmelCase ( self ) -> "DownloadConfig": """simple docstring""" return self.__class__(**{k: copy.deepcopy(SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
368
'''simple docstring''' from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM A : Dict = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" if isinstance(self.unet.config.sample_size , SCREAMING_SNAKE_CASE ): A : List[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: A : Optional[int] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) != batch_size: raise ValueError( F'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE )}, but requested an effective batch' F' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) A : str = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Any = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : int = self.scheduler.step( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , use_clipped_model_output=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample A : Dict = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
311
0
'''simple docstring''' import unittest from parameterized import parameterized from transformers import OpenLlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Dict = parent A : int = batch_size A : List[Any] = seq_length A : Optional[Any] = is_training A : Union[str, Any] = use_input_mask A : Any = use_token_type_ids A : List[Any] = use_labels A : List[str] = vocab_size A : int = hidden_size A : Optional[int] = num_hidden_layers A : List[str] = num_attention_heads A : Tuple = intermediate_size A : int = hidden_act A : str = hidden_dropout_prob A : Any = attention_probs_dropout_prob A : Tuple = max_position_embeddings A : List[str] = type_vocab_size A : Any = type_sequence_label_size A : int = initializer_range A : int = num_labels A : List[Any] = num_choices A : int = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = None if self.use_input_mask: A : Any = random_attention_mask([self.batch_size, self.seq_length] ) A : Any = None if self.use_token_type_ids: A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A : Optional[int] = None A : str = None A : Optional[Any] = None if self.use_labels: A : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) A : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" return OpenLlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , use_stable_embedding=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Union[str, Any] = OpenLlamaModel(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : Any = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> str: """simple docstring""" A : Optional[Any] = True A : Optional[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : Union[str, Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Any = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" A : str = True A : Dict = True A : Dict = OpenLlamaForCausalLM(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() # first forward pass A : Union[str, Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : List[str] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : str = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : int = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() A : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : str = self.prepare_config_and_inputs() ( A ) : Union[str, Any] = config_and_inputs A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( (OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else () ) __magic_name__ = (OpenLlamaForCausalLM,) if is_torch_available() else () __magic_name__ = ( { '''feature-extraction''': OpenLlamaModel, '''text-classification''': OpenLlamaForSequenceClassification, '''text-generation''': OpenLlamaForCausalLM, '''zero-shot''': OpenLlamaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[Any] = OpenLlamaModelTester(self ) A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A : int = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_common() A : Dict = 3 A : Optional[Any] = input_dict['''input_ids'''] A : Optional[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A : Union[str, Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs_for_common() A : List[str] = 3 A : int = '''single_label_classification''' A : Dict = input_dict['''input_ids'''] A : List[Any] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) A : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) A : Tuple = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() A : Any = 3 A : Union[str, Any] = '''multi_label_classification''' A : Any = input_dict['''input_ids'''] A : Optional[int] = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE ) A : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) A : Union[str, Any] = OpenLlamaForSequenceClassification(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip('''Open-Llama buffers include complex numbers, which breaks this test''' ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" pass @parameterized.expand([('''linear''',), ('''dynamic''',)] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs_for_common() A : Optional[Any] = ids_tensor([1, 10] , config.vocab_size ) A : Optional[int] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A : Optional[Any] = OpenLlamaModel(SCREAMING_SNAKE_CASE ) original_model.to(SCREAMING_SNAKE_CASE ) original_model.eval() A : Dict = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state A : str = original_model(SCREAMING_SNAKE_CASE ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A : Dict = {'''type''': scaling_type, '''factor''': 10.0} A : List[str] = OpenLlamaModel(SCREAMING_SNAKE_CASE ) scaled_model.to(SCREAMING_SNAKE_CASE ) scaled_model.eval() A : Optional[Any] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state A : List[str] = scaled_model(SCREAMING_SNAKE_CASE ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-5 ) )
369
'''simple docstring''' from __future__ import annotations from random import random class A : def __init__( self , SCREAMING_SNAKE_CASE = None ) -> Tuple: """simple docstring""" A : Optional[Any] = value A : Any = random() A : Node | None = None A : Node | None = None def __repr__( self ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return F'\'{self.value}: {self.prior:.5}\'' else: return pformat( {F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 ) def __str__( self ) -> str: """simple docstring""" A : Optional[Any] = str(self.value ) + ''' ''' A : Union[str, Any] = str(self.left or '''''' ) A : Any = str(self.right or '''''' ) return value + left + right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: A, A : Any = split(root.left , snake_case__ ) return left, root else: A, A : Optional[int] = split(root.right , snake_case__ ) return root, right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: A : List[str] = merge(left.right , snake_case__ ) return left else: A : Tuple = merge(snake_case__ , right.left ) return right def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = Node(snake_case__ ) A, A : Tuple = split(snake_case__ , snake_case__ ) return merge(merge(snake_case__ , snake_case__ ) , snake_case__ ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A, A : Dict = split(snake_case__ , value - 1 ) A, A : Any = split(snake_case__ , snake_case__ ) return merge(snake_case__ , snake_case__ ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for arg in args.split(): if arg[0] == "+": A : int = insert(snake_case__ , int(arg[1:] ) ) elif arg[0] == "-": A : int = erase(snake_case__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def lowerCAmelCase_ ( ): '''simple docstring''' A : Union[str, Any] = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) A : Optional[int] = input() while args != "q": A : str = interact_treap(snake_case__ , snake_case__ ) print(snake_case__ ) A : Union[str, Any] = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
311
0
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
370
'''simple docstring''' import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=sys.maxsize ) -> Union[str, Any]: """simple docstring""" A : Tuple = '''bilinear''' A : Optional[int] = max_size A : Dict = short_edge_length def __call__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : Tuple = [] for img in imgs: A, A : str = img.shape[:2] # later: provide list and randomly choose index for resize A : Union[str, Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img A : int = size * 1.0 / min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if h < w: A, A : Tuple = size, scale * w else: A, A : str = scale * h, size if max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) > self.max_size: A : List[str] = self.max_size * 1.0 / max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = newh * scale A : int = neww * scale A : List[str] = int(neww + 0.5 ) A : int = int(newh + 0.5 ) if img.dtype == np.uinta: A : Dict = Image.fromarray(SCREAMING_SNAKE_CASE ) A : Optional[Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) A : str = np.asarray(SCREAMING_SNAKE_CASE ) else: A : Dict = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw A : List[Any] = nn.functional.interpolate( SCREAMING_SNAKE_CASE , (newh, neww) , mode=self.interp_method , align_corners=SCREAMING_SNAKE_CASE ).squeeze(0 ) img_augs.append(SCREAMING_SNAKE_CASE ) return img_augs class A : def __init__( self , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Any = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) A : str = cfg.INPUT.FORMAT A : int = cfg.SIZE_DIVISIBILITY A : Optional[int] = cfg.PAD_VALUE A : Dict = cfg.INPUT.MAX_SIZE_TEST A : Optional[Any] = cfg.MODEL.DEVICE A : Dict = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) A : str = lambda SCREAMING_SNAKE_CASE : (x - self.pixel_mean) / self.pixel_std def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = tuple(max(SCREAMING_SNAKE_CASE ) for s in zip(*[img.shape for img in images] ) ) A : List[str] = [im.shape[-2:] for im in images] A : Optional[Any] = [ nn.functional.pad( SCREAMING_SNAKE_CASE , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ] return torch.stack(SCREAMING_SNAKE_CASE ), torch.tensor(SCREAMING_SNAKE_CASE ) def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]: """simple docstring""" with torch.no_grad(): if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : str = [images] if single_image: assert len(SCREAMING_SNAKE_CASE ) == 1 for i in range(len(SCREAMING_SNAKE_CASE ) ): if isinstance(images[i] , torch.Tensor ): images.insert(SCREAMING_SNAKE_CASE , images.pop(SCREAMING_SNAKE_CASE ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( SCREAMING_SNAKE_CASE , torch.as_tensor(img_tensorize(images.pop(SCREAMING_SNAKE_CASE ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge A : Tuple = torch.tensor([im.shape[:2] for im in images] ) A : Dict = self.aug(SCREAMING_SNAKE_CASE ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic A : Tuple = [self.normalizer(SCREAMING_SNAKE_CASE ) for x in images] # now pad them to do the following operations A, A : Optional[int] = self.pad(SCREAMING_SNAKE_CASE ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad A : Tuple = torch.true_divide(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' assert torch.isfinite(snake_case__ ).all(), "Box tensor contains infinite or NaN!" A, A : str = box_size tensor[:, 0].clamp_(min=0 , max=snake_case__ ) tensor[:, 1].clamp_(min=0 , max=snake_case__ ) tensor[:, 2].clamp_(min=0 , max=snake_case__ ) tensor[:, 3].clamp_(min=0 , max=snake_case__ )
311
0
def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if a < 0: raise ValueError('''Input value must be a positive integer''' ) elif isinstance(snake_case__ , snake_case__ ): raise TypeError('''Input value must be a \'int\' type''' ) return bin(snake_case__ ).count('''1''' ) if __name__ == "__main__": import doctest doctest.testmod()
371
'''simple docstring''' import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase : Tuple = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '--original_config_file', default=None, type=str, help='The YAML config file corresponding to the original architecture.', ) parser.add_argument( '--num_in_channels', default=None, type=int, help='The number of input channels. If `None` number of input channels will be automatically inferred.', ) parser.add_argument( '--scheduler_type', default='pndm', type=str, help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']', ) parser.add_argument( '--pipeline_type', default=None, type=str, help=( 'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'' '. If `None` pipeline will be automatically inferred.' ), ) parser.add_argument( '--image_size', default=None, type=int, help=( 'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2' ' Base. Use 768 for Stable Diffusion v2.' ), ) parser.add_argument( '--prediction_type', default=None, type=str, help=( 'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable' ' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.' ), ) parser.add_argument( '--extract_ema', action='store_true', help=( 'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights' ' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield' ' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.' ), ) parser.add_argument( '--upcast_attention', action='store_true', help=( 'Whether the attention computation should always be upcasted. This is necessary when running stable' ' diffusion 2.1.' ), ) parser.add_argument( '--from_safetensors', action='store_true', help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.', ) parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') parser.add_argument( '--stable_unclip', type=str, default=None, required=False, help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.', ) parser.add_argument( '--stable_unclip_prior', type=str, default=None, required=False, help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.', ) parser.add_argument( '--clip_stats_path', type=str, help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.', required=False, ) parser.add_argument( '--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.' ) parser.add_argument('--half', action='store_true', help='Save weights in half precision.') parser.add_argument( '--vae_path', type=str, default=None, required=False, help='Set to a path, hub id to an already converted vae to not convert it again.', ) lowercase : Tuple = parser.parse_args() lowercase : Union[str, Any] = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
311
0
'''simple docstring''' import argparse from collections import defaultdict import yaml lowercase : List[Any] = 'docs/source/en/_toctree.yml' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Tuple = defaultdict(snake_case__ ) for doc in model_doc: counts[doc["local"]] += 1 A : List[str] = [key for key, value in counts.items() if value > 1] A : Dict = [] for duplicate_key in duplicates: A : Union[str, Any] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} ) if len(snake_case__ ) > 1: raise ValueError( F'{duplicate_key} is present several times in the documentation table of content at ' '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] ) # Sort return sorted(snake_case__ , key=lambda snake_case__ : s["title"].lower() ) def lowerCAmelCase_ ( snake_case__=False ): '''simple docstring''' with open(snake_case__ , encoding='''utf-8''' ) as f: A : int = yaml.safe_load(f.read() ) # Get to the API doc A : Optional[Any] = 0 while content[api_idx]["title"] != "API": api_idx += 1 A : Optional[Any] = content[api_idx]['''sections'''] # Then to the model doc A : Any = 0 while api_doc[model_idx]["title"] != "Models": model_idx += 1 A : Optional[int] = api_doc[model_idx]['''sections'''] A : Optional[Any] = [(idx, section) for idx, section in enumerate(snake_case__ ) if '''sections''' in section] A : Optional[Any] = False for idx, modality_doc in modalities_docs: A : Tuple = modality_doc['''sections'''] A : Tuple = clean_model_doc_toc(snake_case__ ) if old_modality_doc != new_modality_doc: A : Union[str, Any] = True if overwrite: A : List[Any] = new_modality_doc if diff: if overwrite: A : int = model_doc A : Dict = api_doc with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(snake_case__ , allow_unicode=snake_case__ ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[str] = parser.parse_args() check_model_doc(args.fix_and_overwrite)
350
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = "," __magic_name__ = None __magic_name__ = "infer" __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = "." __magic_name__ = None __magic_name__ = '"' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = "strict" __magic_name__ = "error" __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
311
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() A : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : Tuple = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } A : List[str] = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16000, '''return_attention_mask''': False, '''do_normalize''': True, } A : Union[str, Any] = tempfile.mkdtemp() A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A : Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) # load decoder from hub A : Optional[int] = '''hf-internal-testing/ngram-beam-search-decoder''' def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Dict = self.add_kwargs_tokens_map.copy() kwargs.update(SCREAMING_SNAKE_CASE ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Any = self.get_tokenizer() A : Optional[Any] = self.get_feature_extractor() A : int = self.get_decoder() A : List[str] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) processor.save_pretrained(self.tmpdirname ) A : List[Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match A : Any = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , '''include''' ): WavaVecaProcessorWithLM( tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.get_feature_extractor() A : int = self.get_tokenizer() A : Union[str, Any] = self.get_decoder() A : Any = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) A : int = floats_list((3, 1000) ) A : Any = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : Dict = processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[Any] = self.get_feature_extractor() A : Union[str, Any] = self.get_tokenizer() A : Optional[Any] = self.get_decoder() A : Tuple = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) A : int = '''This is a test string''' A : Optional[Any] = processor(text=SCREAMING_SNAKE_CASE ) A : Tuple = tokenizer(SCREAMING_SNAKE_CASE ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=(2, 10, 16) , SCREAMING_SNAKE_CASE=77 ) -> str: """simple docstring""" np.random.seed(SCREAMING_SNAKE_CASE ) return np.random.rand(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Dict = self.get_feature_extractor() A : Dict = self.get_tokenizer() A : Optional[int] = self.get_decoder() A : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) A : Optional[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) A : Union[str, Any] = processor.decode(SCREAMING_SNAKE_CASE ) A : List[str] = decoder.decode_beams(SCREAMING_SNAKE_CASE )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : List[str] = self.get_feature_extractor() A : Optional[Any] = self.get_tokenizer() A : List[str] = self.get_decoder() A : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) A : Dict = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: A : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE ) else: with get_context(SCREAMING_SNAKE_CASE ).Pool() as pool: A : List[Any] = processor.batch_decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : int = list(SCREAMING_SNAKE_CASE ) with get_context('''fork''' ).Pool() as p: A : Tuple = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Tuple = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(SCREAMING_SNAKE_CASE , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(SCREAMING_SNAKE_CASE , decoded_processor.logit_score ) self.assertListEqual(SCREAMING_SNAKE_CASE , decoded_processor.lm_score ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : List[str] = self.get_feature_extractor() A : Union[str, Any] = self.get_tokenizer() A : Any = self.get_decoder() A : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) A : Optional[int] = self._get_dummy_logits() A : Optional[Any] = 15 A : str = -20.0 A : Union[str, Any] = -4.0 A : str = processor.batch_decode( SCREAMING_SNAKE_CASE , beam_width=SCREAMING_SNAKE_CASE , beam_prune_logp=SCREAMING_SNAKE_CASE , token_min_logp=SCREAMING_SNAKE_CASE , ) A : Any = decoded_processor_out.text A : List[Any] = list(SCREAMING_SNAKE_CASE ) with get_context('''fork''' ).Pool() as pool: A : Union[str, Any] = decoder.decode_beams_batch( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , beam_width=SCREAMING_SNAKE_CASE , beam_prune_logp=SCREAMING_SNAKE_CASE , token_min_logp=SCREAMING_SNAKE_CASE , ) A : int = [d[0][0] for d in decoded_decoder_out] A : Dict = [d[0][2] for d in decoded_decoder_out] A : Union[str, Any] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , SCREAMING_SNAKE_CASE ) self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9_474] , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[int] = self.get_feature_extractor() A : Optional[Any] = self.get_tokenizer() A : Any = self.get_decoder() A : Optional[int] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) A : Dict = self._get_dummy_logits() A : Tuple = 2.0 A : Union[str, Any] = 5.0 A : Optional[int] = -20.0 A : Optional[int] = True A : List[str] = processor.batch_decode( SCREAMING_SNAKE_CASE , alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , unk_score_offset=SCREAMING_SNAKE_CASE , lm_score_boundary=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = decoded_processor_out.text A : int = list(SCREAMING_SNAKE_CASE ) decoder.reset_params( alpha=SCREAMING_SNAKE_CASE , beta=SCREAMING_SNAKE_CASE , unk_score_offset=SCREAMING_SNAKE_CASE , lm_score_boundary=SCREAMING_SNAKE_CASE , ) with get_context('''fork''' ).Pool() as pool: A : List[Any] = decoder.decode_beams_batch( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) A : str = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , SCREAMING_SNAKE_CASE ) A : Optional[int] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) A : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] A : Optional[Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() A : List[str] = os.listdir(SCREAMING_SNAKE_CASE ) A : Tuple = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' ) A : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE ) A : int = processor.decoder.model_container[processor.decoder._model_key] A : Any = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() A : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE ) A : Any = os.listdir(SCREAMING_SNAKE_CASE ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : int = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) A : List[str] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) A : Dict = floats_list((3, 1000) ) A : Dict = processor_wavaveca(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) A : List[str] = processor_auto(SCREAMING_SNAKE_CASE , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) A : Union[str, Any] = self._get_dummy_logits() A : Dict = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE ) A : str = processor_auto.batch_decode(SCREAMING_SNAKE_CASE ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Union[str, Any] = self.get_feature_extractor() A : Optional[Any] = self.get_tokenizer() A : int = self.get_decoder() A : Tuple = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" A : Optional[int] = [d[key] for d in offsets] return retrieved_list def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) A : Dict = self._get_dummy_logits()[0] A : List[str] = processor.decode(SCREAMING_SNAKE_CASE , output_word_offsets=SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) A : List[str] = self._get_dummy_logits() A : Any = processor.batch_decode(SCREAMING_SNAKE_CASE , output_word_offsets=SCREAMING_SNAKE_CASE ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(SCREAMING_SNAKE_CASE , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" import torch A : Any = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=SCREAMING_SNAKE_CASE ) A : int = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) ) A : str = iter(SCREAMING_SNAKE_CASE ) A : Optional[Any] = next(SCREAMING_SNAKE_CASE ) A : int = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) A : Union[str, Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train A : Union[str, Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): A : List[str] = model(SCREAMING_SNAKE_CASE ).logits.cpu().numpy() A : Optional[int] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE ) A : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate A : Dict = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] A : int = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(SCREAMING_SNAKE_CASE , '''word''' ) ) , SCREAMING_SNAKE_CASE ) self.assertEqual(''' '''.join(self.get_from_offsets(SCREAMING_SNAKE_CASE , '''word''' ) ) , output.text ) # output times A : Optional[int] = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE , '''start_time''' ) ) A : int = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE , '''end_time''' ) ) # fmt: off A : int = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] ) A : List[str] = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=0.01 ) ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=0.01 ) )
351
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : int = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class A ( __snake_case ): __magic_name__ = '''sew''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.05 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , **SCREAMING_SNAKE_CASE , ) -> Tuple: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE , pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE ) A : Optional[Any] = hidden_size A : Any = feat_extract_norm A : Optional[int] = feat_extract_activation A : Tuple = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : List[str] = list(SCREAMING_SNAKE_CASE ) A : int = conv_bias A : List[Any] = num_conv_pos_embeddings A : Tuple = num_conv_pos_embedding_groups A : int = len(self.conv_dim ) A : Dict = num_hidden_layers A : Optional[int] = intermediate_size A : Any = squeeze_factor A : int = hidden_act A : str = num_attention_heads A : Dict = hidden_dropout A : Optional[Any] = attention_dropout A : List[str] = activation_dropout A : Union[str, Any] = feat_proj_dropout A : Union[str, Any] = final_dropout A : int = layerdrop A : Optional[Any] = layer_norm_eps A : Any = initializer_range A : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)' F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A : Optional[Any] = apply_spec_augment A : Optional[Any] = mask_time_prob A : Union[str, Any] = mask_time_length A : Optional[Any] = mask_time_min_masks A : str = mask_feature_prob A : Tuple = mask_feature_length A : Any = mask_feature_min_masks # ctc loss A : List[Any] = ctc_loss_reduction A : Dict = ctc_zero_infinity # sequence classification A : int = use_weighted_layer_sum A : Optional[int] = classifier_proj_size @property def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
311
0
'''simple docstring''' import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline lowercase : Any = argparse.ArgumentParser('Stable Diffusion script with intel optimization', add_help=False) parser.add_argument('--dpm', action='store_true', help='Enable DPMSolver or not') parser.add_argument('--steps', default=None, type=int, help='Num inference steps') lowercase : List[Any] = parser.parse_args() lowercase : Union[str, Any] = 'cpu' lowercase : int = 'a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings' lowercase : str = 'path-to-your-trained-model' lowercase : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: lowercase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) lowercase : Dict = pipe.to(device) # to channels last lowercase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last) lowercase : Dict = pipe.vae.to(memory_format=torch.channels_last) lowercase : List[str] = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: lowercase : Any = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex lowercase : Optional[int] = torch.randn(2, 4, 64, 64) lowercase : Optional[int] = torch.rand(1) * 9_99 lowercase : Dict = torch.randn(2, 77, 7_68) lowercase : str = (sample, timestep, encoder_hidden_status) try: lowercase : Any = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: lowercase : str = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) lowercase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) lowercase : Optional[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute lowercase : Tuple = 6_66 lowercase : int = torch.Generator(device).manual_seed(seed) lowercase : Tuple = {'generator': generator} if args.steps is not None: lowercase : List[Any] = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): lowercase : Union[str, Any] = pipe(prompt, **generate_kwargs).images[0] # save image image.save('generated.png')
352
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Dict = SwinConfig() A : List[Any] = swin_name.split('''_''' ) A : Tuple = name_split[1] A : Union[str, Any] = int(name_split[4] ) A : str = int(name_split[3][-1] ) if model_size == "tiny": A : Optional[int] = 96 A : Optional[Any] = (2, 2, 6, 2) A : Any = (3, 6, 12, 24) elif model_size == "small": A : Optional[int] = 96 A : str = (2, 2, 18, 2) A : Tuple = (3, 6, 12, 24) elif model_size == "base": A : int = 128 A : Optional[Any] = (2, 2, 18, 2) A : List[str] = (4, 8, 16, 32) else: A : Dict = 192 A : Optional[Any] = (2, 2, 18, 2) A : Optional[Any] = (6, 12, 24, 48) if "in22k" in swin_name: A : Dict = 2_1841 else: A : str = 1000 A : List[str] = '''huggingface/label-files''' A : Any = '''imagenet-1k-id2label.json''' A : Any = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='''dataset''' ) , '''r''' ) ) A : str = {int(snake_case__ ): v for k, v in idalabel.items()} A : Tuple = idalabel A : Tuple = {v: k for k, v in idalabel.items()} A : Tuple = img_size A : Dict = num_classes A : Optional[Any] = embed_dim A : str = depths A : str = num_heads A : Optional[int] = window_size return config def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "patch_embed.proj" in name: A : Any = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: A : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: A : Optional[int] = '''encoder.''' + name if "attn.proj" in name: A : List[str] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: A : List[str] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: A : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: A : Tuple = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: A : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: A : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "norm.weight": A : Tuple = '''layernorm.weight''' if name == "norm.bias": A : Tuple = '''layernorm.bias''' if "head" in name: A : Any = name.replace('''head''' , '''classifier''' ) else: A : List[Any] = '''swin.''' + name return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): A : Dict = orig_state_dict.pop(snake_case__ ) if "mask" in key: continue elif "qkv" in key: A : Dict = key.split('''.''' ) A : Optional[int] = int(key_split[1] ) A : List[str] = int(key_split[3] ) A : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: A : Any = val[:dim, :] A : Dict = val[ dim : dim * 2, : ] A : List[str] = val[-dim:, :] else: A : Any = val[ :dim ] A : Optional[int] = val[ dim : dim * 2 ] A : Any = val[ -dim: ] else: A : str = val return orig_state_dict def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Tuple = timm.create_model(snake_case__ , pretrained=snake_case__ ) timm_model.eval() A : Optional[Any] = get_swin_config(snake_case__ ) A : Optional[int] = SwinForImageClassification(snake_case__ ) model.eval() A : List[str] = convert_state_dict(timm_model.state_dict() , snake_case__ ) model.load_state_dict(snake_case__ ) A : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' A : Any = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) ) A : List[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) A : List[Any] = image_processor(images=snake_case__ , return_tensors='''pt''' ) A : Any = timm_model(inputs['''pixel_values'''] ) A : Optional[Any] = model(**snake_case__ ).logits assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(snake_case__ ) if __name__ == "__main__": lowercase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swin_name', default='swin_tiny_patch4_window7_224', type=str, help='Name of the Swin timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) lowercase : int = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
311
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowercase : Dict = logging.get_logger(__name__) lowercase : Tuple = { 'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json', } class A ( __snake_case , __snake_case ): __magic_name__ = '''convnextv2''' def __init__( self , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : Union[str, Any] = num_channels A : List[str] = patch_size A : str = num_stages A : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes A : Dict = [3, 3, 9, 3] if depths is None else depths A : Union[str, Any] = hidden_act A : List[Any] = initializer_range A : Optional[int] = layer_norm_eps A : List[Any] = drop_path_rate A : List[Any] = image_size A : Any = ['''stem'''] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] A : Optional[Any] = get_aligned_output_features_output_indices( out_features=SCREAMING_SNAKE_CASE , out_indices=SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
353
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Optional[int] = logging.get_logger(__name__) lowercase : Tuple = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class A ( __snake_case ): __magic_name__ = '''pix2struct_text_model''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''hidden_size''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]: """simple docstring""" A : str = vocab_size A : List[str] = hidden_size A : List[Any] = d_kv A : Optional[Any] = d_ff A : Dict = num_layers A : Dict = num_heads A : Optional[int] = relative_attention_num_buckets A : Optional[Any] = relative_attention_max_distance A : Dict = dropout_rate A : Dict = layer_norm_epsilon A : Tuple = initializer_factor A : Union[str, Any] = use_cache A : int = eos_token_id A : List[str] = decoder_start_token_id # for backwards compatibility A : int = dense_act_fn super().__init__( pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Union[str, Any] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct_vision_model''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) A : List[str] = hidden_size A : Optional[Any] = patch_embed_hidden_size A : Union[str, Any] = d_ff A : Dict = dropout_rate A : str = num_hidden_layers A : Dict = num_attention_heads A : Tuple = initializer_range A : List[str] = initializer_factor A : Union[str, Any] = attention_dropout A : Tuple = layer_norm_eps A : int = dense_act_fn A : Optional[int] = seq_len A : Tuple = relative_attention_num_buckets A : str = relative_attention_max_distance A : Optional[Any] = d_kv @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE ) A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('''model_type''' ) == "pix2struct": A : Optional[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) class A ( __snake_case ): __magic_name__ = '''pix2struct''' __magic_name__ = True def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) if text_config is None: A : Dict = {} logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' ) if vision_config is None: A : str = {} logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' ) A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE ) A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE ) A : Any = self.text_config.decoder_start_token_id A : Any = self.text_config.pad_token_id A : Dict = self.text_config.eos_token_id A : Union[str, Any] = initializer_factor A : Tuple = initializer_range A : Optional[Any] = self.initializer_range A : int = self.initializer_range A : Tuple = is_vqa @classmethod def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Tuple = copy.deepcopy(self.__dict__ ) A : Dict = self.text_config.to_dict() A : int = self.vision_config.to_dict() A : Any = self.__class__.model_type return output
311
0
'''simple docstring''' import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowercase : Tuple = logging.get_logger(__name__) lowercase : Union[str, Any] = ['model.decoder.embed_positions.weights'] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if "emb" in name: A : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' ) if "transformer" in name: A : List[Any] = name.replace('''transformer''' , '''model.decoder''' ) if "cross_attention" in name: A : List[Any] = name.replace('''cross_attention''' , '''encoder_attn''' ) if "linear1" in name: A : Optional[int] = name.replace('''linear1''' , '''fc1''' ) if "linear2" in name: A : List[Any] = name.replace('''linear2''' , '''fc2''' ) if "norm1" in name: A : List[Any] = name.replace('''norm1''' , '''self_attn_layer_norm''' ) if "norm_cross" in name: A : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' ) if "norm2" in name: A : Dict = name.replace('''norm2''' , '''final_layer_norm''' ) if "out_norm" in name: A : Union[str, Any] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' ) if "linears" in name: A : Dict = name.replace('''linears''' , '''lm_heads''' ) if "condition_provider.conditioners.description.output_proj" in name: A : List[Any] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' ) return name def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : List[Any] = list(state_dict.keys() ) A : Optional[int] = {} for key in keys: A : List[str] = state_dict.pop(snake_case__ ) A : str = rename_keys(snake_case__ ) if "in_proj_weight" in key: # split fused qkv proj A : List[Any] = val[:hidden_size, :] A : Tuple = val[hidden_size : 2 * hidden_size, :] A : Dict = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: A : int = val else: A : Union[str, Any] = val return state_dict, enc_dec_proj_state_dict def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if checkpoint == "small": # default config values A : int = 1024 A : int = 24 A : Dict = 16 elif checkpoint == "medium": A : Optional[Any] = 1536 A : str = 48 A : List[str] = 24 elif checkpoint == "large": A : int = 2048 A : int = 48 A : Dict = 32 else: raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' ) A : Optional[int] = MusicgenDecoderConfig( hidden_size=snake_case__ , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case__ , num_attention_heads=snake_case__ , ) return config @torch.no_grad() def lowerCAmelCase_ ( snake_case__ , snake_case__=None , snake_case__=None , snake_case__="cpu" ): '''simple docstring''' A : Optional[Any] = MusicGen.get_pretrained(snake_case__ , device=snake_case__ ) A : int = decoder_config_from_checkpoint(snake_case__ ) A : int = fairseq_model.lm.state_dict() A : Union[str, Any] = rename_state_dict( snake_case__ , hidden_size=decoder_config.hidden_size ) A : str = TaEncoderModel.from_pretrained('''t5-base''' ) A : str = EncodecModel.from_pretrained('''facebook/encodec_32khz''' ) A : List[str] = MusicgenForCausalLM(snake_case__ ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection A : str = decoder.load_state_dict(snake_case__ , strict=snake_case__ ) for key in missing_keys.copy(): if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(snake_case__ ) if len(snake_case__ ) > 0: raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' ) if len(snake_case__ ) > 0: raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' ) # init the composite model A : Union[str, Any] = MusicgenForConditionalGeneration(text_encoder=snake_case__ , audio_encoder=snake_case__ , decoder=snake_case__ ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(snake_case__ ) # check we can do a forward pass A : List[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) A : Any = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): A : List[Any] = model(input_ids=snake_case__ , decoder_input_ids=snake_case__ ).logits if logits.shape != (8, 1, 2048): raise ValueError('''Incorrect shape for logits''' ) # now construct the processor A : Optional[Any] = AutoTokenizer.from_pretrained('''t5-base''' ) A : List[str] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' ) A : List[Any] = MusicgenProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ ) # set the appropriate bos/pad token ids A : List[str] = 2048 A : List[str] = 2048 # set other default generation config params A : Any = int(30 * audio_encoder.config.frame_rate ) A : Optional[Any] = True A : str = 3.0 if pytorch_dump_folder is not None: Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' ) model.save_pretrained(snake_case__ ) processor.save_pretrained(snake_case__ ) if repo_id: logger.info(F'Pushing model {checkpoint} to {repo_id}' ) model.push_to_hub(snake_case__ ) processor.push_to_hub(snake_case__ ) if __name__ == "__main__": lowercase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint', default='small', type=str, help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.', ) parser.add_argument( '--pytorch_dump_folder', required=True, default=None, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) parser.add_argument( '--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.' ) lowercase : Tuple = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
354
'''simple docstring''' from __future__ import annotations def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : List[str] = 2 A : Dict = [] while i * i <= n: if n % i: i += 1 else: n //= i factors.append(snake_case__ ) if n > 1: factors.append(snake_case__ ) return factors if __name__ == "__main__": import doctest doctest.testmod()
311
0
from ..utils import DummyObject, requires_backends class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class A ( metaclass=__snake_case ): __magic_name__ = ['''sentencepiece'''] def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" requires_backends(self , ['''sentencepiece'''] )
355
'''simple docstring''' # Function to print upper half of diamond (pyramid) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(0 , snake_case__ ): for _ in range(0 , n - i - 1 ): # printing spaces print(''' ''' , end='''''' ) for _ in range(0 , i + 1 ): # printing stars print('''* ''' , end='''''' ) print() def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for i in range(snake_case__ , 0 , -1 ): for _ in range(snake_case__ , 0 , -1 ): # printing stars print('''* ''' , end='''''' ) print() for _ in range(n - i + 1 , 0 , -1 ): # printing spaces print(''' ''' , end='''''' ) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if n <= 0: print(''' ... .... nothing printing :(''' ) return floyd(snake_case__ ) # upper half reverse_floyd(snake_case__ ) # lower half if __name__ == "__main__": print(R'| /\ | |- | |- |--| |\ /| |-') print(R'|/ \| |- |_ |_ |__| | \/ | |_') lowercase : List[str] = 1 while K: lowercase : List[Any] = int(input('enter the number and , and see the magic : ')) print() pretty_print(user_number) lowercase : Any = int(input('press 0 to exit... and 1 to continue...')) print('Good Bye...')
311
0
'''simple docstring''' import unittest import numpy as np def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ): '''simple docstring''' A : List[Any] = np.shape(snake_case__ ) A : Optional[Any] = np.shape(snake_case__ ) A : Any = np.shape(snake_case__ ) if shape_a[0] != shape_b[0]: A : Tuple = ( '''Expected the same number of rows for A and B. ''' F'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(snake_case__ ) if shape_b[1] != shape_c[1]: A : List[Any] = ( '''Expected the same number of columns for B and C. ''' F'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(snake_case__ ) A : str = pseudo_inv if a_inv is None: try: A : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> None: """simple docstring""" A : Optional[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A : Any = np.array([[0, 3], [3, 0], [2, 3]] ) A : Optional[int] = np.array([[2, 1], [6, 3]] ) A : int = schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : Union[str, Any] = np.block([[a, b], [b.T, c]] ) A : Any = np.linalg.det(SCREAMING_SNAKE_CASE ) A : List[str] = np.linalg.det(SCREAMING_SNAKE_CASE ) A : Tuple = np.linalg.det(SCREAMING_SNAKE_CASE ) self.assertAlmostEqual(SCREAMING_SNAKE_CASE , det_a * det_s ) def __lowerCAmelCase ( self ) -> None: """simple docstring""" A : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A : str = np.array([[0, 3], [3, 0], [2, 3]] ) A : Tuple = np.array([[2, 1], [6, 3]] ) with self.assertRaises(SCREAMING_SNAKE_CASE ): schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> None: """simple docstring""" A : Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) A : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) A : int = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(SCREAMING_SNAKE_CASE ): schur_complement(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
356
'''simple docstring''' # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers import DiffusionPipeline, ImagePipelineOutput class A ( __snake_case ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ) -> Union[ImagePipelineOutput, Tuple]: """simple docstring""" A : List[Any] = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE , ) A : Optional[Any] = image.to(self.device ) # set step values self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A : Tuple = self.unet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 A : List[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample A : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) A : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": A : List[Any] = self.numpy_to_pil(SCREAMING_SNAKE_CASE ) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE ), "This is a local test"
311
0
'''simple docstring''' import datasets lowercase : Tuple = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' lowercase : Union[str, Any] = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' lowercase : Dict = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}
357
'''simple docstring''' import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=50 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , ) -> str: """simple docstring""" A : Any = parent A : List[Any] = batch_size A : Union[str, Any] = seq_length A : Any = is_training A : int = use_input_mask A : Union[str, Any] = vocab_size A : List[Any] = hidden_size A : List[Any] = num_hidden_layers A : Optional[int] = num_attention_heads A : str = intermediate_size A : Tuple = hidden_act A : Union[str, Any] = hidden_dropout_prob A : Union[str, Any] = attention_probs_dropout_prob A : int = max_position_embeddings A : Optional[int] = initializer_range A : Any = use_labels A : Optional[int] = scope def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Optional[int] = None if self.use_input_mask: A : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A : Dict = self.get_config() return config, input_ids, input_mask, token_labels def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ) : Any = self.prepare_config_and_inputs() A : Tuple = True A : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) A : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : List[str] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE ) A : int = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" A : List[str] = True A : Union[str, Any] = BertGenerationEncoder(config=SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , ) A : List[Any] = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> List[str]: """simple docstring""" A : Optional[Any] = True A : Tuple = True A : Optional[int] = BertGenerationDecoder(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval() # first forward pass A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE , ) A : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) A : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and A : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) A : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) A : str = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] A : Any = model( SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , )['''hidden_states'''][0] # select random slice A : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() A : str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 ) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationDecoder(SCREAMING_SNAKE_CASE ) model.to(SCREAMING_SNAKE_CASE ) model.eval() A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A, A, A : Optional[int] = self.prepare_config_and_inputs() A : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class A ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () __magic_name__ = (BertGenerationDecoder,) if is_torch_available() else () __magic_name__ = ( {'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder} if is_torch_available() else {} ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : List[str] = BertGenerationEncoderTester(self ) A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A, A, A, A : Tuple = self.model_tester.prepare_config_and_inputs() A : str = '''bert''' self.model_tester.create_and_check_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" ( ( A ), ( A ), ( A ), ( A ), ( A ), ( A ), ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() A : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*SCREAMING_SNAKE_CASE ) @slow def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[Any] = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = BertGenerationEncoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Optional[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Dict = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Dict = torch.tensor( [[[0.1_775, 0.0_083, -0.0_321], [1.6_002, 0.1_287, 0.3_912], [2.1_473, 0.5_791, 0.6_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) ) @require_torch class A ( unittest.TestCase ): @slow def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : Optional[Any] = BertGenerationDecoder.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' ) A : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): A : Optional[Any] = model(SCREAMING_SNAKE_CASE )[0] A : Optional[Any] = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE ) A : Any = torch.tensor( [[[-0.5_788, -2.5_994, -3.7_054], [0.0_438, 4.7_997, 1.8_795], [1.5_862, 6.6_409, 4.4_638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal lowercase : str = datasets.utils.logging.get_logger(__name__) lowercase : Union[str, Any] = ['names', 'prefix'] lowercase : Union[str, Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols'] lowercase : List[Any] = ['encoding_errors', 'on_bad_lines'] lowercase : Any = ['date_format'] @dataclass class A ( datasets.BuilderConfig ): __magic_name__ = ''',''' __magic_name__ = None __magic_name__ = '''infer''' __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = False __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = True __magic_name__ = None __magic_name__ = '''.''' __magic_name__ = None __magic_name__ = '''"''' __magic_name__ = 0 __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = None __magic_name__ = True __magic_name__ = True __magic_name__ = 0 __magic_name__ = True __magic_name__ = False __magic_name__ = None __magic_name__ = 10000 __magic_name__ = None __magic_name__ = '''strict''' __magic_name__ = '''error''' __magic_name__ = None def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" if self.delimiter is not None: A : Optional[Any] = self.delimiter if self.column_names is not None: A : Optional[Any] = self.column_names @property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : str = { '''sep''': self.sep, '''header''': self.header, '''names''': self.names, '''index_col''': self.index_col, '''usecols''': self.usecols, '''prefix''': self.prefix, '''mangle_dupe_cols''': self.mangle_dupe_cols, '''engine''': self.engine, '''converters''': self.converters, '''true_values''': self.true_values, '''false_values''': self.false_values, '''skipinitialspace''': self.skipinitialspace, '''skiprows''': self.skiprows, '''nrows''': self.nrows, '''na_values''': self.na_values, '''keep_default_na''': self.keep_default_na, '''na_filter''': self.na_filter, '''verbose''': self.verbose, '''skip_blank_lines''': self.skip_blank_lines, '''thousands''': self.thousands, '''decimal''': self.decimal, '''lineterminator''': self.lineterminator, '''quotechar''': self.quotechar, '''quoting''': self.quoting, '''escapechar''': self.escapechar, '''comment''': self.comment, '''encoding''': self.encoding, '''dialect''': self.dialect, '''error_bad_lines''': self.error_bad_lines, '''warn_bad_lines''': self.warn_bad_lines, '''skipfooter''': self.skipfooter, '''doublequote''': self.doublequote, '''memory_map''': self.memory_map, '''float_precision''': self.float_precision, '''chunksize''': self.chunksize, '''encoding_errors''': self.encoding_errors, '''on_bad_lines''': self.on_bad_lines, '''date_format''': self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , SCREAMING_SNAKE_CASE ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class A ( datasets.ArrowBasedBuilder ): __magic_name__ = CsvConfig def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if not self.config.data_files: raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' ) A : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(SCREAMING_SNAKE_CASE , (str, list, tuple) ): A : str = data_files if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : int = [files] A : Optional[int] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] A : Tuple = [] for split_name, files in data_files.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : List[str] = [files] A : List[str] = [dl_manager.iter_files(SCREAMING_SNAKE_CASE ) for file in files] splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files} ) ) return splits def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> pa.Table: """simple docstring""" if self.config.features is not None: A : Optional[int] = self.config.features.arrow_schema if all(not require_storage_cast(SCREAMING_SNAKE_CASE ) for feature in self.config.features.values() ): # cheaper cast A : List[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=SCREAMING_SNAKE_CASE ) else: # more expensive cast; allows str <-> int/float or str to Audio for example A : int = table_cast(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) return pa_table def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" A : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str A : int = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(SCREAMING_SNAKE_CASE ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE ) ): A : Union[str, Any] = pd.read_csv(SCREAMING_SNAKE_CASE , iterator=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(SCREAMING_SNAKE_CASE ): A : Dict = pa.Table.from_pandas(SCREAMING_SNAKE_CASE ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE ) except ValueError as e: logger.error(F'Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE )}: {e}' ) raise
358
'''simple docstring''' import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : Optional[int] = np.max(_outputs , axis=-1 , keepdims=snake_case__ ) A : Any = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ ) class A ( __snake_case ): __magic_name__ = '''sigmoid''' __magic_name__ = '''softmax''' __magic_name__ = '''none''' @add_end_docstrings( __snake_case , R''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class A ( __snake_case ): __magic_name__ = False __magic_name__ = ClassificationFunction.NONE def __init__( self , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == '''tf''' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="" , **SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" A : Optional[Any] = tokenizer_kwargs A : int = {} if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None: A : int = self.model.config.return_all_scores if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or top_k is None: A : Union[str, Any] = top_k A : Dict = False elif return_all_scores is not None: warnings.warn( '''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of''' ''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , SCREAMING_SNAKE_CASE , ) if return_all_scores: A : Optional[int] = None else: A : Dict = 1 if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): A : Dict = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: A : int = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : str = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. A : Any = '''top_k''' not in kwargs if isinstance(args[0] , SCREAMING_SNAKE_CASE ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict[str, GenericTensor]: """simple docstring""" A : List[Any] = self.framework if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): return self.tokenizer(**SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( '''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a''' ''' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.''' ) return self.tokenizer(SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.model(**SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=True ) -> List[str]: """simple docstring""" if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: A : Optional[int] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: A : Any = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None: A : Optional[int] = self.model.config.function_to_apply else: A : Optional[int] = ClassificationFunction.NONE A : Any = model_outputs['''logits'''][0] A : List[Any] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: A : int = sigmoid(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.SOFTMAX: A : Any = softmax(SCREAMING_SNAKE_CASE ) elif function_to_apply == ClassificationFunction.NONE: A : int = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} A : int = [ {'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(SCREAMING_SNAKE_CASE ) ] if not _legacy: dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=SCREAMING_SNAKE_CASE ) if top_k is not None: A : Union[str, Any] = dict_scores[:top_k] return dict_scores
311
0
'''simple docstring''' import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class A ( __snake_case ): __magic_name__ = (DDIMParallelScheduler,) __magic_name__ = (('''eta''', 0.0), ('''num_inference_steps''', 50)) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Optional[Any] = { '''num_train_timesteps''': 1000, '''beta_start''': 0.0_001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''clip_sample''': True, } config.update(**SCREAMING_SNAKE_CASE ) return config def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : int = self.scheduler_classes[0] A : str = self.get_scheduler_config(**SCREAMING_SNAKE_CASE ) A : int = scheduler_class(**SCREAMING_SNAKE_CASE ) A : str = 10, 0.0 A : Dict = self.dummy_model() A : List[str] = self.dummy_sample_deter scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) for t in scheduler.timesteps: A : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) A : List[str] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample return sample def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE ) A : str = self.scheduler_classes[0] A : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) A : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) scheduler.set_timesteps(5 ) assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ): self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE ) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ): self.check_over_forward(time_step=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Optional[int] = self.scheduler_classes[0] A : Any = self.get_scheduler_config() A : Any = scheduler_class(**SCREAMING_SNAKE_CASE ) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5 assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5 def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.scheduler_classes[0] A : str = self.get_scheduler_config() A : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE ) A : int = 10, 0.0 scheduler.set_timesteps(SCREAMING_SNAKE_CASE ) A : List[Any] = self.dummy_model() A : List[str] = self.dummy_sample_deter A : Dict = self.dummy_sample_deter + 0.1 A : Optional[Any] = self.dummy_sample_deter - 0.1 A : Optional[int] = samplea.shape[0] A : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 ) A : Tuple = torch.arange(SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE ) A : Tuple = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) ) A : List[Any] = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE ) A : int = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 1147.7904 ) < 1e-2 assert abs(result_mean.item() - 0.4_982 ) < 1e-3 def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Optional[Any] = self.full_loop() A : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 172.0_067 ) < 1e-2 assert abs(result_mean.item() - 0.223_967 ) < 1e-3 def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Union[str, Any] = self.full_loop(prediction_type='''v_prediction''' ) A : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 52.5_302 ) < 1e-2 assert abs(result_mean.item() - 0.0_684 ) < 1e-3 def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Tuple = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE , beta_start=0.01 ) A : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 149.8_295 ) < 1e-2 assert abs(result_mean.item() - 0.1_951 ) < 1e-3 def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Any = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE , beta_start=0.01 ) A : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) ) A : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) ) assert abs(result_sum.item() - 149.0_784 ) < 1e-2 assert abs(result_mean.item() - 0.1_941 ) < 1e-3
359
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCAmelCase_ ( snake_case__ = "laptop" ): '''simple docstring''' A : Tuple = F'https://www.amazon.in/laptop/s?k={product}' A : Optional[int] = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } A : Any = BeautifulSoup(requests.get(snake_case__ , headers=snake_case__ ).text ) # Initialize a Pandas dataframe with the column titles A : List[str] = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: A : Optional[Any] = item.ha.text A : Union[str, Any] = '''https://www.amazon.in/''' + item.ha.a['''href'''] A : Tuple = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: A : int = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: A : Optional[int] = '''Not available''' try: A : str = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: A : List[Any] = '''''' try: A : Dict = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: A : str = float('''nan''' ) except AttributeError: pass A : Union[str, Any] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] A : List[str] = ''' ''' A : Optional[Any] = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase : Union[str, Any] = 'headphones' get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
311
0
'''simple docstring''' import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowercase : Union[str, Any] = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n' lowercase : List[Any] = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n' lowercase : Any = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ), '''references''': datasets.Sequence( datasets.Sequence(datasets.Value('''string''' , id='''token''' ) , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=['''https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'''] , reference_urls=[ '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=False ) -> str: """simple docstring""" A : int = compute_bleu( reference_corpus=SCREAMING_SNAKE_CASE , translation_corpus=SCREAMING_SNAKE_CASE , max_order=SCREAMING_SNAKE_CASE , smooth=SCREAMING_SNAKE_CASE ) (A) : str = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
360
'''simple docstring''' import colorsys from PIL import Image # type: ignore def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[int] = x A : str = y for step in range(snake_case__ ): # noqa: B007 A : str = a * a - b * b + x A : List[str] = 2 * a * b + y A : str = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case__ , 1 , 1 ) ) def lowerCAmelCase_ ( snake_case__ = 800 , snake_case__ = 600 , snake_case__ = -0.6 , snake_case__ = 0 , snake_case__ = 3.2 , snake_case__ = 50 , snake_case__ = True , ): '''simple docstring''' A : List[Any] = Image.new('''RGB''' , (image_width, image_height) ) A : Tuple = img.load() # loop through the image-coordinates for image_x in range(snake_case__ ): for image_y in range(snake_case__ ): # determine the figure-coordinates based on the image-coordinates A : Optional[int] = figure_width / image_width * image_height A : Tuple = figure_center_x + (image_x / image_width - 0.5) * figure_width A : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height A : str = get_distance(snake_case__ , snake_case__ , snake_case__ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: A : str = get_color_coded_rgb(snake_case__ ) else: A : List[Any] = get_black_and_white_rgb(snake_case__ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure lowercase : Optional[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
311
0
'''simple docstring''' import json import os import unittest from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors @require_tokenizers class A ( __snake_case , unittest.TestCase ): __magic_name__ = MvpTokenizer __magic_name__ = MvpTokenizerFast __magic_name__ = True __magic_name__ = filter_roberta_detectors def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" super().setUp() A : Any = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', ] A : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) ) A : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] A : Any = {'''unk_token''': '''<unk>'''} A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" return "lower newer", "lower newer" @cached_property def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' ) @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' ) @require_torch def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] A : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A : int = tokenizer(SCREAMING_SNAKE_CASE , max_length=len(SCREAMING_SNAKE_CASE ) , padding=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) A : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Test that special tokens are reset @require_torch def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A : str = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) # check if input_ids are returned and no labels self.assertIn('''input_ids''' , SCREAMING_SNAKE_CASE ) self.assertIn('''attention_mask''' , SCREAMING_SNAKE_CASE ) self.assertNotIn('''labels''' , SCREAMING_SNAKE_CASE ) self.assertNotIn('''decoder_attention_mask''' , SCREAMING_SNAKE_CASE ) @require_torch def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = [ '''Summary of the text.''', '''Another summary.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A : List[Any] = tokenizer(text_target=SCREAMING_SNAKE_CASE , max_length=32 , padding='''max_length''' , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) @require_torch def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A : Tuple = tokenizer( ['''I am a small frog''' * 1024, '''I am a small frog'''] , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) self.assertEqual(batch.input_ids.shape , (2, 1024) ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Tuple = ['''A long paragraph for summarization.'''] A : Optional[Any] = [ '''Summary of the text.''', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: A : int = tokenizer(SCREAMING_SNAKE_CASE , text_target=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ) A : Optional[Any] = inputs['''input_ids'''] A : Optional[int] = inputs['''labels'''] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ): A : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) A : Dict = '''A, <mask> AllenNLP sentence.''' A : Optional[Any] = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE ) A : Dict = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , ) A : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] ) A : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) self.assertSequenceEqual( SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
361
'''simple docstring''' import argparse import importlib from pathlib import Path # Test all the extensions added in the setup lowercase : Optional[int] = [ 'kernels/rwkv/wkv_cuda.cu', 'kernels/rwkv/wkv_op.cpp', 'kernels/deformable_detr/ms_deform_attn.h', 'kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh', 'models/graphormer/algos_graphormer.pyx', ] def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": lowercase : str = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') lowercase : Optional[Any] = parser.parse_args() if args.check_lib: lowercase : List[Any] = importlib.import_module('transformers') lowercase : str = Path(transformers_module.__file__).parent else: lowercase : List[Any] = Path.cwd() / 'build/lib/transformers' if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
311
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowercase : str = logging.get_logger(__name__) lowercase : Any = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase : Tuple = { 'vocab_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt' ), 'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt', 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'squeezebert/squeezebert-uncased': ( 'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli': ( 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json' ), 'squeezebert/squeezebert-mnli-headless': ( 'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json' ), }, } lowercase : List[str] = { 'squeezebert/squeezebert-uncased': 5_12, 'squeezebert/squeezebert-mnli': 5_12, 'squeezebert/squeezebert-mnli-headless': 5_12, } lowercase : List[Any] = { 'squeezebert/squeezebert-uncased': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli': {'do_lower_case': True}, 'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True}, } class A ( __snake_case ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_INIT_CONFIGURATION __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = SqueezeBertTokenizer def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE="[UNK]" , SCREAMING_SNAKE_CASE="[SEP]" , SCREAMING_SNAKE_CASE="[PAD]" , SCREAMING_SNAKE_CASE="[CLS]" , SCREAMING_SNAKE_CASE="[MASK]" , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Optional[int]: """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , do_lower_case=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , tokenize_chinese_chars=SCREAMING_SNAKE_CASE , strip_accents=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) A : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('''strip_accents''' , SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): A : Tuple = getattr(SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) ) A : Optional[Any] = do_lower_case A : Any = strip_accents A : Tuple = tokenize_chinese_chars A : Dict = normalizer_class(**SCREAMING_SNAKE_CASE ) A : Optional[Any] = do_lower_case def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Tuple: """simple docstring""" A : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[int]: """simple docstring""" A : int = [self.sep_token_id] A : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> Tuple[str]: """simple docstring""" A : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE ) return tuple(SCREAMING_SNAKE_CASE )
362
'''simple docstring''' from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=2 , ) -> List[str]: """simple docstring""" A : List[str] = parent A : Optional[Any] = batch_size A : Tuple = image_size A : int = patch_size A : Optional[int] = num_channels A : str = is_training A : List[Any] = use_labels A : Any = hidden_size A : Any = num_hidden_layers A : Optional[int] = num_attention_heads A : Any = intermediate_size A : List[str] = hidden_act A : str = hidden_dropout_prob A : Tuple = attention_probs_dropout_prob A : Any = type_sequence_label_size A : Optional[int] = initializer_range A : Dict = scope A : Tuple = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) A : List[Any] = (image_size // patch_size) ** 2 A : Tuple = num_patches + 2 def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A : Tuple = None if self.use_labels: A : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A : Tuple = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" A : Any = TFDeiTModel(config=SCREAMING_SNAKE_CASE ) A : str = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: """simple docstring""" A : Tuple = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE ) A : List[Any] = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A : Optional[int] = 1 A : str = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE ) A : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Tuple = model(SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" A : str = self.type_sequence_label_size A : Optional[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Optional[Any] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images A : Optional[Any] = 1 A : List[str] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE ) A : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A : Optional[int] = self.prepare_config_and_inputs() A, A, A : Tuple = config_and_inputs A : Any = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class A ( __snake_case , __snake_case , unittest.TestCase ): __magic_name__ = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFDeiTModel, '''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Tuple = TFDeiTModelTester(self ) A : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DeiT does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" pass def __lowerCAmelCase ( self ) -> str: """simple docstring""" A, A : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) A : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Dense ) ) def __lowerCAmelCase ( self ) -> int: """simple docstring""" A, A : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A : Any = model_class(SCREAMING_SNAKE_CASE ) A : str = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A : Union[str, Any] = [*signature.parameters.keys()] A : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" A : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple: """simple docstring""" A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE ) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters: del inputs_dict["labels"] return inputs_dict @slow def __lowerCAmelCase ( self ) -> str: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A : List[str] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsNotNone(SCREAMING_SNAKE_CASE ) def lowerCAmelCase_ ( ): '''simple docstring''' A : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" A : Union[str, Any] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ) A : Dict = self.default_image_processor A : List[str] = prepare_img() A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' ) # forward pass A : Optional[int] = model(**SCREAMING_SNAKE_CASE ) # verify the logits A : List[Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE ) A : str = tf.constant([-1.0_266, 0.1_912, -1.2_861] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
311
0
'''simple docstring''' lowercase : Dict = 2_56 # Modulus to hash a string lowercase : Optional[int] = 1_00_00_03 def lowerCAmelCase_ ( snake_case__ , snake_case__ ): '''simple docstring''' A : Optional[Any] = len(snake_case__ ) A : Union[str, Any] = len(snake_case__ ) if p_len > t_len: return False A : str = 0 A : Optional[Any] = 0 A : Optional[Any] = 1 # Calculating the hash of pattern and substring of text for i in range(snake_case__ ): A : Optional[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus A : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue A : Union[str, Any] = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash A : int = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowerCAmelCase_ ( ): '''simple docstring''' A : str = '''abc1abc12''' A : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' A : Optional[Any] = '''alskfjaldsk23adsfabcabc''' assert rabin_karp(snake_case__ , snake_case__ ) and not rabin_karp(snake_case__ , snake_case__ ) # Test 2) A : Dict = '''ABABX''' A : int = '''ABABZABABYABABX''' assert rabin_karp(snake_case__ , snake_case__ ) # Test 3) A : List[str] = '''AAAB''' A : List[str] = '''ABAAAAAB''' assert rabin_karp(snake_case__ , snake_case__ ) # Test 4) A : Optional[Any] = '''abcdabcy''' A : Optional[Any] = '''abcxabcdabxabcdabcdabcy''' assert rabin_karp(snake_case__ , snake_case__ ) # Test 5) A : Optional[Any] = '''Lü''' A : int = '''Lüsai''' assert rabin_karp(snake_case__ , snake_case__ ) A : str = '''Lue''' assert not rabin_karp(snake_case__ , snake_case__ ) print('''Success.''' ) if __name__ == "__main__": test_rabin_karp()
363
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase : List[str] = { 'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'], 'tokenization_cpmant': ['CpmAntTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ 'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST', 'CpmAntForCausalLM', 'CpmAntModel', 'CpmAntPreTrainedModel', ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
311
0
'''simple docstring''' import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) lowercase : List[str] = logging.getLogger(__name__) lowercase : List[Any] = {'facebook/bart-base': BartForConditionalGeneration} lowercase : int = {'facebook/bart-base': BartTokenizer} def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[Any] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' ) parser.add_argument( '''--validation_file''' , type=snake_case__ , default=snake_case__ , help='''A csv or a json file containing the validation data.''' ) parser.add_argument( '''--max_length''' , type=snake_case__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , ) parser.add_argument( '''--num_beams''' , type=snake_case__ , default=snake_case__ , help=( '''Number of beams to use for evaluation. This argument will be ''' '''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.''' ) , ) parser.add_argument( '''--model_name_or_path''' , type=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , ) parser.add_argument( '''--config_name''' , type=snake_case__ , default=snake_case__ , help='''Pretrained config name or path if not the same as model_name''' , ) parser.add_argument( '''--device''' , type=snake_case__ , default='''cpu''' , help='''Device where the model will be run''' , ) parser.add_argument('''--output_file_path''' , type=snake_case__ , default=snake_case__ , help='''Where to store the final ONNX file.''' ) A : str = parser.parse_args() return args def lowerCAmelCase_ ( snake_case__ , snake_case__="cpu" ): '''simple docstring''' A : int = model_dict[model_name].from_pretrained(snake_case__ ).to(snake_case__ ) A : Union[str, Any] = tokenizer_dict[model_name].from_pretrained(snake_case__ ) if model_name in ["facebook/bart-base"]: A : Dict = 0 A : Any = None A : Optional[Any] = 0 return huggingface_model, tokenizer def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' model.eval() A : List[str] = None A : List[str] = torch.jit.script(BARTBeamSearchGenerator(snake_case__ ) ) with torch.no_grad(): A : Any = '''My friends are cool but they eat too many carbs.''' A : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device ) A : int = model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=snake_case__ , max_length=snake_case__ , early_stopping=snake_case__ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( snake_case__ , ( inputs['''input_ids'''], inputs['''attention_mask'''], num_beams, max_length, model.config.decoder_start_token_id, ) , snake_case__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''seq'''}, '''output_ids''': {0: '''batch''', 1: '''seq_out'''}, } , example_outputs=snake_case__ , ) logger.info('''Model exported to {}'''.format(snake_case__ ) ) A : Any = remove_dup_initializers(os.path.abspath(snake_case__ ) ) logger.info('''Deduplicated and optimized model written to {}'''.format(snake_case__ ) ) A : Optional[int] = onnxruntime.InferenceSession(snake_case__ ) A : Optional[int] = ort_sess.run( snake_case__ , { '''input_ids''': inputs['''input_ids'''].cpu().numpy(), '''attention_mask''': inputs['''attention_mask'''].cpu().numpy(), '''num_beams''': np.array(snake_case__ ), '''max_length''': np.array(snake_case__ ), '''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('''Model outputs from torch and ONNX Runtime are similar.''' ) logger.info('''Success.''' ) def lowerCAmelCase_ ( ): '''simple docstring''' A : Optional[int] = parse_args() A : Optional[int] = 5 A : List[str] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() A : Dict = torch.device(args.device ) A : Any = load_model_tokenizer(args.model_name_or_path , snake_case__ ) if model.config.decoder_start_token_id is None: raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' ) model.to(snake_case__ ) if args.max_length: A : Dict = args.max_length if args.num_beams: A : Dict = args.num_beams if args.output_file_path: A : Optional[int] = args.output_file_path else: A : Union[str, Any] = '''BART.onnx''' logger.info('''Exporting model to ONNX''' ) export_and_validate_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) if __name__ == "__main__": main()
364
'''simple docstring''' from __future__ import annotations lowercase : Union[str, Any] = list[tuple[int, int]] lowercase : Optional[Any] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowercase : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" A : int = pos_x A : Optional[Any] = pos_y A : Optional[Any] = (pos_y, pos_x) A : str = goal_x A : Optional[int] = goal_y A : List[Any] = g_cost A : str = parent A : str = self.calculate_heuristic() def __lowerCAmelCase ( self ) -> float: """simple docstring""" A : Optional[int] = abs(self.pos_x - self.goal_x ) A : Optional[Any] = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , SCREAMING_SNAKE_CASE ) -> bool: """simple docstring""" return self.f_cost < other.f_cost class A : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" A : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , SCREAMING_SNAKE_CASE ) A : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99999 , SCREAMING_SNAKE_CASE ) A : Optional[Any] = [self.start] A : list[Node] = [] A : Tuple = False def __lowerCAmelCase ( self ) -> Path | None: """simple docstring""" while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() A : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: A : Optional[int] = True return self.retrace_path(SCREAMING_SNAKE_CASE ) self.closed_nodes.append(SCREAMING_SNAKE_CASE ) A : Any = self.get_successors(SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: # retrieve the best current path A : str = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> list[Node]: """simple docstring""" A : List[Any] = [] for action in delta: A : List[str] = parent.pos_x + action[1] A : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Path: """simple docstring""" A : int = node A : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) A : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowercase : Tuple = (0, 0) lowercase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('------') lowercase : int = GreedyBestFirst(init, goal) lowercase : Union[str, Any] = greedy_bf.search() if path: for pos_x, pos_y in path: lowercase : Dict = 2 for elem in grid: print(elem)
311
0
'''simple docstring''' lowercase : int = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' lowercase : Union[str, Any] = [{'type': 'code', 'content': INSTALL_CONTENT}] lowercase : Optional[int] = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
365
'''simple docstring''' import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowercase : Any = 'src/transformers' lowercase : str = 'docs/source/en/tasks' def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: A : Union[str, Any] = f.readlines() # Find the start prompt. A : List[Any] = 0 while not lines[start_index].startswith(snake_case__ ): start_index += 1 start_index += 1 A : List[str] = start_index while not lines[end_index].startswith(snake_case__ ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowercase : int = direct_transformers_import(TRANSFORMERS_PATH) lowercase : str = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowercase : Optional[int] = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' A : int = TASK_GUIDE_TO_MODELS[task_guide] A : List[str] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case__ , set() ) A : Union[str, Any] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n" def lowerCAmelCase_ ( snake_case__ , snake_case__=False ): '''simple docstring''' A, A, A, A : Optional[int] = _find_text_in_file( filename=os.path.join(snake_case__ , snake_case__ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , ) A : Optional[int] = get_model_list_for_task(snake_case__ ) if current_list != new_list: if overwrite: with open(os.path.join(snake_case__ , snake_case__ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`' ''' to fix this.''' ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase : List[Any] = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
311
0
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 lowercase : Union[str, Any] = get_tests_dir('fixtures/dummy-config.json') class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : int = 0 def __lowerCAmelCase ( self ) -> Dict: """simple docstring""" self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec('''transformers.models.auto''' ) ) def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : Union[str, Any] = AutoConfig.from_pretrained('''bert-base-uncased''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : List[Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : str = AutoConfig.for_model('''roberta''' ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. A : List[Any] = os.path.join(SCREAMING_SNAKE_CASE , '''fake-roberta''' ) os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) with open(os.path.join(SCREAMING_SNAKE_CASE , '''config.json''' ) , '''w''' ) as f: f.write(json.dumps({} ) ) A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertEqual(type(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" try: AutoConfig.register('''custom''' , SCREAMING_SNAKE_CASE ) # Wrong model type will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoConfig.register('''model''' , SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(SCREAMING_SNAKE_CASE ): AutoConfig.register('''bert''' , SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API A : Any = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE ) A : Tuple = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''bert-base is not a local folder and is not a valid model identifier''' ): A : Any = AutoConfig.from_pretrained('''bert-base''' ) def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): A : int = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , revision='''aaaaaa''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" with self.assertRaisesRegex( SCREAMING_SNAKE_CASE , '''hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.''' , ): A : Dict = AutoConfig.from_pretrained('''hf-internal-testing/no-config-test-repo''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" with self.assertRaises(SCREAMING_SNAKE_CASE ): A : Optional[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(SCREAMING_SNAKE_CASE ): A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE ) A : Dict = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(SCREAMING_SNAKE_CASE ) A : Optional[int] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(reloaded_config.__class__.__name__ , '''NewModelConfig''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: """simple docstring""" class A ( __snake_case ): __magic_name__ = '''new-model''' try: AutoConfig.register('''new-model''' , SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local A : str = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote code is disabled, we load the local one. A : Any = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(config.__class__.__name__ , '''NewModelConfigLocal''' ) # If remote is enabled, we load from the Hub A : List[Any] = AutoConfig.from_pretrained('''hf-internal-testing/test_dynamic_model''' , trust_remote_code=SCREAMING_SNAKE_CASE ) self.assertEqual(config.__class__.__name__ , '''NewModelConfig''' ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
366
'''simple docstring''' def lowerCAmelCase_ ( snake_case__ ): '''simple docstring''' if len(snake_case__ ) <= 1: return [tuple(snake_case__ )] A : Tuple = [] def generate(snake_case__ , snake_case__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , snake_case__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even A, A : Optional[Any] = arr[k - 1], arr[i] else: # k is odd A, A : Optional[Any] = arr[k - 1], arr[0] generate(k - 1 , snake_case__ ) generate(len(snake_case__ ) , snake_case__ ) return res if __name__ == "__main__": lowercase : List[str] = input('Enter numbers separated by a comma:\n').strip() lowercase : int = [int(item) for item in user_input.split(',')] print(heaps(arr))
311
0