code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : Tuple = logging.get_logger(__name__) snake_case_ : int = { "weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json", } class __a (lowerCamelCase ): __a : Union[str, Any] = "roc_bert" def __init__( self : Dict , __magic_name__ : Tuple=3_05_22 , __magic_name__ : Optional[Any]=7_68 , __magic_name__ : Dict=12 , __magic_name__ : Tuple=12 , __magic_name__ : Dict=30_72 , __magic_name__ : List[str]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[Any]=5_12 , __magic_name__ : Optional[Any]=2 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : Any=1E-12 , __magic_name__ : List[Any]=True , __magic_name__ : Any=0 , __magic_name__ : Optional[Any]="absolute" , __magic_name__ : List[str]=None , __magic_name__ : Tuple=True , __magic_name__ : Optional[Any]=True , __magic_name__ : int=7_68 , __magic_name__ : Tuple=9_10 , __magic_name__ : str=5_12 , __magic_name__ : Optional[int]=2_48_58 , __magic_name__ : Optional[Any]=True , **__magic_name__ : Union[str, Any] , ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[Any] = vocab_size UpperCAmelCase_ : List[str] = max_position_embeddings UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Dict = type_vocab_size UpperCAmelCase_ : str = layer_norm_eps UpperCAmelCase_ : int = use_cache UpperCAmelCase_ : int = enable_pronunciation UpperCAmelCase_ : List[Any] = enable_shape UpperCAmelCase_ : int = pronunciation_embed_dim UpperCAmelCase_ : Union[str, Any] = pronunciation_vocab_size UpperCAmelCase_ : Optional[int] = shape_embed_dim UpperCAmelCase_ : Optional[Any] = shape_vocab_size UpperCAmelCase_ : List[str] = concat_input UpperCAmelCase_ : Dict = position_embedding_type UpperCAmelCase_ : Tuple = classifier_dropout super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> Any: """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ): UpperCAmelCase_ : Optional[Any] = model_result['''result'''][batch_size][sequence_length] self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase_ : Tuple = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) UpperCAmelCase_ : Dict = TensorFlowBenchmark(__magic_name__ ) UpperCAmelCase_ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = '''sgugger/tiny-distilbert-classification''' UpperCAmelCase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , only_pretrain_model=__magic_name__ , ) UpperCAmelCase_ : Any = TensorFlowBenchmark(__magic_name__ ) UpperCAmelCase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = TensorFlowBenchmark(__magic_name__ ) UpperCAmelCase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : int = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) UpperCAmelCase_ : str = TensorFlowBenchmark(__magic_name__ , [config] ) UpperCAmelCase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(__magic_name__ ) UpperCAmelCase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = TensorFlowBenchmark(__magic_name__ , [config] ) UpperCAmelCase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Tuple = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) UpperCAmelCase_ : Any = TensorFlowBenchmark(__magic_name__ ) UpperCAmelCase_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) UpperCAmelCase_ : List[str] = TensorFlowBenchmark(__magic_name__ , [config] ) UpperCAmelCase_ : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def UpperCAmelCase__ ( self : Dict ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = '''patrickvonplaten/t5-tiny-random''' UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__magic_name__ , ) UpperCAmelCase_ : List[Any] = TensorFlowBenchmark(__magic_name__ , configs=[config] ) UpperCAmelCase_ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , '''Cannot do xla on CPU.''' ) def UpperCAmelCase__ ( self : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = '''sshleifer/tiny-gpt2''' UpperCAmelCase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=__magic_name__ , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__magic_name__ , multi_process=__magic_name__ , ) UpperCAmelCase_ : Dict = TensorFlowBenchmark(__magic_name__ ) UpperCAmelCase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : int = '''sshleifer/tiny-gpt2''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , save_to_csv=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__magic_name__ , '''inf_time.csv''' ) , inference_memory_csv_file=os.path.join(__magic_name__ , '''inf_mem.csv''' ) , env_info_csv_file=os.path.join(__magic_name__ , '''env.csv''' ) , multi_process=__magic_name__ , ) UpperCAmelCase_ : Optional[int] = TensorFlowBenchmark(__magic_name__ ) benchmark.run() self.assertTrue(Path(os.path.join(__magic_name__ , '''inf_time.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , '''inf_mem.csv''' ) ).exists() ) self.assertTrue(Path(os.path.join(__magic_name__ , '''env.csv''' ) ).exists() ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : str = '''sshleifer/tiny-gpt2''' def _check_summary_is_not_empty(__magic_name__ : str ): self.assertTrue(hasattr(__magic_name__ , '''sequential''' ) ) self.assertTrue(hasattr(__magic_name__ , '''cumulative''' ) ) self.assertTrue(hasattr(__magic_name__ , '''current''' ) ) self.assertTrue(hasattr(__magic_name__ , '''total''' ) ) with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=__magic_name__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__magic_name__ , '''log.txt''' ) , log_print=__magic_name__ , trace_memory_line_by_line=__magic_name__ , eager_mode=__magic_name__ , multi_process=__magic_name__ , ) UpperCAmelCase_ : Dict = TensorFlowBenchmark(__magic_name__ ) UpperCAmelCase_ : Dict = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__magic_name__ , '''log.txt''' ) ).exists() )
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class __a (lowerCamelCase ): __a : Dict = "wavlm" def __init__( self : Dict , __magic_name__ : Any=32 , __magic_name__ : Dict=7_68 , __magic_name__ : int=12 , __magic_name__ : List[str]=12 , __magic_name__ : Optional[int]=30_72 , __magic_name__ : List[str]="gelu" , __magic_name__ : Dict=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Any=0.0 , __magic_name__ : List[str]=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : int=1E-5 , __magic_name__ : str="group" , __magic_name__ : Tuple="gelu" , __magic_name__ : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __magic_name__ : Any=(5, 2, 2, 2, 2, 2, 2) , __magic_name__ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __magic_name__ : Optional[int]=False , __magic_name__ : Dict=1_28 , __magic_name__ : int=16 , __magic_name__ : Tuple=3_20 , __magic_name__ : Any=8_00 , __magic_name__ : Optional[int]=False , __magic_name__ : Any=True , __magic_name__ : Tuple=0.0_5 , __magic_name__ : Tuple=10 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : List[str]=0.0 , __magic_name__ : Tuple=10 , __magic_name__ : Optional[Any]=3_20 , __magic_name__ : Tuple=2 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=1_00 , __magic_name__ : Dict=2_56 , __magic_name__ : Any=2_56 , __magic_name__ : Tuple=0.1 , __magic_name__ : Dict="mean" , __magic_name__ : Dict=False , __magic_name__ : List[str]=False , __magic_name__ : int=2_56 , __magic_name__ : Optional[int]=(5_12, 5_12, 5_12, 5_12, 15_00) , __magic_name__ : Union[str, Any]=(5, 3, 3, 1, 1) , __magic_name__ : str=(1, 2, 3, 1, 1) , __magic_name__ : Tuple=5_12 , __magic_name__ : Any=80 , __magic_name__ : Dict=0 , __magic_name__ : List[Any]=1 , __magic_name__ : int=2 , __magic_name__ : List[str]=False , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=2 , __magic_name__ : str=3 , __magic_name__ : Any=None , **__magic_name__ : Any , ) -> List[Any]: """simple docstring""" super().__init__(**__magic_name__ , pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Union[str, Any] = feat_extract_norm UpperCAmelCase_ : Union[str, Any] = feat_extract_activation UpperCAmelCase_ : List[Any] = list(__magic_name__ ) UpperCAmelCase_ : Any = list(__magic_name__ ) UpperCAmelCase_ : List[str] = list(__magic_name__ ) UpperCAmelCase_ : Tuple = conv_bias UpperCAmelCase_ : Any = num_buckets UpperCAmelCase_ : Union[str, Any] = max_bucket_distance UpperCAmelCase_ : Tuple = num_conv_pos_embeddings UpperCAmelCase_ : Optional[Any] = num_conv_pos_embedding_groups UpperCAmelCase_ : Union[str, Any] = len(self.conv_dim ) UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[str] = hidden_dropout UpperCAmelCase_ : int = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : Optional[int] = feat_proj_dropout UpperCAmelCase_ : Any = final_dropout UpperCAmelCase_ : List[Any] = layerdrop UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Optional[int] = num_ctc_classes UpperCAmelCase_ : List[str] = vocab_size UpperCAmelCase_ : Tuple = do_stable_layer_norm UpperCAmelCase_ : List[Any] = use_weighted_layer_sum UpperCAmelCase_ : Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,""" F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Optional[Any] = mask_time_prob UpperCAmelCase_ : Dict = mask_time_length UpperCAmelCase_ : Dict = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : Union[str, Any] = mask_feature_length # parameters for pretraining with codevector quantized representations UpperCAmelCase_ : str = num_codevectors_per_group UpperCAmelCase_ : List[Any] = num_codevector_groups UpperCAmelCase_ : str = contrastive_logits_temperature UpperCAmelCase_ : List[Any] = num_negatives UpperCAmelCase_ : List[str] = codevector_dim UpperCAmelCase_ : List[str] = proj_codevector_dim UpperCAmelCase_ : int = diversity_loss_weight # ctc loss UpperCAmelCase_ : List[Any] = ctc_loss_reduction UpperCAmelCase_ : Union[str, Any] = ctc_zero_infinity # adapter UpperCAmelCase_ : Dict = add_adapter UpperCAmelCase_ : Union[str, Any] = adapter_kernel_size UpperCAmelCase_ : str = adapter_stride UpperCAmelCase_ : Any = num_adapter_layers UpperCAmelCase_ : List[Any] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. UpperCAmelCase_ : str = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. UpperCAmelCase_ : List[Any] = list(__magic_name__ ) UpperCAmelCase_ : str = list(__magic_name__ ) UpperCAmelCase_ : Dict = list(__magic_name__ ) UpperCAmelCase_ : List[str] = xvector_output_dim @property def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import MaskaFormerConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel if is_vision_available(): from transformers import MaskaFormerImageProcessor if is_vision_available(): from PIL import Image class __a : def __init__( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : str=2 , __magic_name__ : str=True , __magic_name__ : int=False , __magic_name__ : Any=10 , __magic_name__ : Dict=3 , __magic_name__ : Tuple=32 * 8 , __magic_name__ : int=32 * 8 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Dict=64 , ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : List[str] = use_auxiliary_loss UpperCAmelCase_ : Any = num_queries UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Optional[int] = min_size UpperCAmelCase_ : Any = max_size UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : Optional[int] = hidden_dim UpperCAmelCase_ : Dict = hidden_dim def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( __magic_name__ ) UpperCAmelCase_ : Dict = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__magic_name__ ) UpperCAmelCase_ : str = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__magic_name__ ) > 0.5 ).float() UpperCAmelCase_ : str = (torch.rand((self.batch_size, self.num_labels) , device=__magic_name__ ) > 0.5).long() UpperCAmelCase_ : List[str] = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[str] = MaskaFormerConfig( hidden_size=self.hidden_dim , ) UpperCAmelCase_ : Tuple = self.num_queries UpperCAmelCase_ : int = self.num_labels UpperCAmelCase_ : str = [1, 1, 1, 1] UpperCAmelCase_ : int = self.num_channels UpperCAmelCase_ : Union[str, Any] = 64 UpperCAmelCase_ : Tuple = 1_28 UpperCAmelCase_ : str = self.hidden_dim UpperCAmelCase_ : Union[str, Any] = self.hidden_dim UpperCAmelCase_ : List[Any] = self.hidden_dim return config def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase_ : int = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask} return config, inputs_dict def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = output.encoder_hidden_states UpperCAmelCase_ : str = output.pixel_decoder_hidden_states UpperCAmelCase_ : str = output.transformer_decoder_hidden_states self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__magic_name__ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(__magic_name__ ) , config.decoder_layers ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=False ) -> List[str]: """simple docstring""" with torch.no_grad(): UpperCAmelCase_ : Dict = MaskaFormerModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[str] = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ ) UpperCAmelCase_ : Dict = model(__magic_name__ , output_hidden_states=__magic_name__ ) self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(__magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : List[Any] = MaskaFormerForUniversalSegmentation(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() def comm_check_on_output(__magic_name__ : Optional[Any] ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(pixel_values=__magic_name__ , pixel_mask=__magic_name__ ) UpperCAmelCase_ : Dict = model(__magic_name__ ) comm_check_on_output(__magic_name__ ) UpperCAmelCase_ : Dict = model( pixel_values=__magic_name__ , pixel_mask=__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ) comm_check_on_output(__magic_name__ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else () __a : Union[str, Any] = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {} __a : List[str] = False __a : Dict = False __a : List[str] = False __a : Any = False def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = MaskaFormerModelTester(self ) UpperCAmelCase_ : str = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__magic_name__ ) @unittest.skip(reason='''Mask2Former does not use inputs_embeds''' ) def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' ) def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass @unittest.skip(reason='''Mask2Former is not a generative model''' ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='''Mask2Former does not use token embeddings''' ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass @require_torch_multi_gpu @unittest.skip( reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' ) def UpperCAmelCase__ ( self : Optional[int] ) -> str: """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" pass def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(__magic_name__ ) UpperCAmelCase_ : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : str = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" for model_name in ["facebook/mask2former-swin-small-coco-instance"]: UpperCAmelCase_ : str = MaskaFormerModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = (self.model_tester.min_size,) * 2 UpperCAmelCase_ : Tuple = { '''pixel_values''': torch.randn((2, 3, *size) , device=__magic_name__ ), '''mask_labels''': torch.randn((2, 10, *size) , device=__magic_name__ ), '''class_labels''': torch.zeros(2 , 10 , device=__magic_name__ ).long(), } UpperCAmelCase_ : Optional[int] = self.model_tester.get_config() UpperCAmelCase_ : Any = MaskaFormerForUniversalSegmentation(__magic_name__ ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = model(**__magic_name__ ) self.assertTrue(outputs.loss is not None ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskaformer_model(__magic_name__ , **__magic_name__ , output_hidden_states=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(__magic_name__ ).to(__magic_name__ ) UpperCAmelCase_ : Tuple = model(**__magic_name__ , output_attentions=__magic_name__ ) self.assertTrue(outputs.attentions is not None ) def UpperCAmelCase__ ( self : int ) -> int: """simple docstring""" if not self.model_tester.is_training: return UpperCAmelCase_ : int = self.all_model_classes[1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ).loss loss.backward() def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = self.all_model_classes[1] UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ : int = True UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Union[str, Any] = model_class(__magic_name__ ).to(__magic_name__ ) model.train() UpperCAmelCase_ : List[Any] = model(__magic_name__ , mask_labels=__magic_name__ , class_labels=__magic_name__ ) UpperCAmelCase_ : List[str] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() UpperCAmelCase_ : str = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() UpperCAmelCase_ : Optional[int] = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() UpperCAmelCase_ : str = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=__magic_name__ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) snake_case_ : Optional[Any] = 1E-4 def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @slow class __a (unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return "facebook/mask2former-swin-small-coco-instance" @cached_property def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__magic_name__ ) UpperCAmelCase_ : Any = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : str = image_processor(__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__magic_name__ , (1, 3, 3_84, 3_84) ) with torch.no_grad(): UpperCAmelCase_ : int = model(**__magic_name__ ) UpperCAmelCase_ : int = torch.tensor( [[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(__magic_name__ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) UpperCAmelCase_ : Dict = torch.tensor( [[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(__magic_name__ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) UpperCAmelCase_ : int = torch.tensor( [[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(__magic_name__ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval() UpperCAmelCase_ : int = self.default_image_processor UpperCAmelCase_ : Dict = prepare_img() UpperCAmelCase_ : Optional[int] = image_processor(__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = inputs['''pixel_values'''].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(__magic_name__ , (1, 3, 3_84, 3_84) ) with torch.no_grad(): UpperCAmelCase_ : Any = model(**__magic_name__ ) # masks_queries_logits UpperCAmelCase_ : Dict = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ) UpperCAmelCase_ : Union[str, Any] = [ [-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1], [-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1], [-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5], ] UpperCAmelCase_ : List[str] = torch.tensor(__magic_name__ ).to(__magic_name__ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) # class_queries_logits UpperCAmelCase_ : Optional[int] = outputs.class_queries_logits self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) ) UpperCAmelCase_ : Optional[Any] = torch.tensor( [ [1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2], [0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3], [0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5], ] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __magic_name__ , atol=__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__magic_name__ ).eval() UpperCAmelCase_ : str = self.default_image_processor UpperCAmelCase_ : str = image_processor( [np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , ) UpperCAmelCase_ : List[str] = inputs['''pixel_values'''].to(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = [el.to(__magic_name__ ) for el in inputs['''mask_labels''']] UpperCAmelCase_ : List[Any] = [el.to(__magic_name__ ) for el in inputs['''class_labels''']] with torch.no_grad(): UpperCAmelCase_ : List[str] = model(**__magic_name__ ) self.assertTrue(outputs.loss is not None )
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : Optional[Any] = {"vocab_file": "spiece.model"} snake_case_ : Union[str, Any] = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } snake_case_ : Any = { "google/bigbird-roberta-base": 40_96, "google/bigbird-roberta-large": 40_96, "google/bigbird-base-trivia-itc": 40_96, } class __a (lowerCamelCase ): __a : int = VOCAB_FILES_NAMES __a : Any = PRETRAINED_VOCAB_FILES_MAP __a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Any = ["input_ids", "attention_mask"] __a : List[int] = [] def __init__( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[Any]="<unk>" , __magic_name__ : int="<s>" , __magic_name__ : int="</s>" , __magic_name__ : Dict="<pad>" , __magic_name__ : Dict="[SEP]" , __magic_name__ : Dict="[MASK]" , __magic_name__ : List[str]="[CLS]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Optional[int] , ) -> None: """simple docstring""" UpperCAmelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token UpperCAmelCase_ : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token UpperCAmelCase_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token UpperCAmelCase_ : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token UpperCAmelCase_ : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token UpperCAmelCase_ : Any = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sep_token=__magic_name__ , mask_token=__magic_name__ , cls_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = vocab_file UpperCAmelCase_ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]: """simple docstring""" return self.sp_model.get_piece_size() def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : int , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : Any = {} UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.sp_model.piece_to_id(__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] ) -> str: """simple docstring""" UpperCAmelCase_ : str = self.sp_model.IdToPiece(__magic_name__ ) return token def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = [] UpperCAmelCase_ : List[str] = '''''' UpperCAmelCase_ : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : List[str] = [] else: current_sub_tokens.append(__magic_name__ ) UpperCAmelCase_ : Any = False out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : bool = False , __magic_name__ : bool = None , __magic_name__ : bool = True , **__magic_name__ : List[Any] , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = kwargs.pop('''use_source_tokenizer''' , __magic_name__ ) UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ , skip_special_tokens=__magic_name__ ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 UpperCAmelCase_ : int = [] UpperCAmelCase_ : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) ) UpperCAmelCase_ : List[Any] = [] sub_texts.append(__magic_name__ ) else: current_sub_text.append(__magic_name__ ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__magic_name__ ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: UpperCAmelCase_ : List[str] = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(__magic_name__ ) ) else: UpperCAmelCase_ : Union[str, Any] = ''''''.join(__magic_name__ ) UpperCAmelCase_ : Optional[int] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: UpperCAmelCase_ : Any = self.clean_up_tokenization(__magic_name__ ) return clean_text else: return text def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : Tuple = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : int = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : List[Any] = [self.cls_token_id] UpperCAmelCase_ : List[str] = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class __a : @staticmethod def UpperCAmelCase__ ( *__magic_name__ : Any , **__magic_name__ : Dict ) -> Dict: """simple docstring""" pass def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Image ) -> str: UpperCAmelCase_ : int = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class __a (unittest.TestCase ): __a : str = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : List[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = DepthEstimationPipeline(model=__magic_name__ , image_processor=__magic_name__ ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> Dict: """simple docstring""" UpperCAmelCase_ : str = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __magic_name__ ) import datasets UpperCAmelCase_ : int = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) UpperCAmelCase_ : List[str] = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , __magic_name__ , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" pass @slow @require_torch def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = '''Intel/dpt-large''' UpperCAmelCase_ : Tuple = pipeline('''depth-estimation''' , model=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) UpperCAmelCase_ : int = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 ) @require_torch def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ : int = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class __a (lowerCamelCase , unittest.TestCase ): __a : str = BartphoTokenizer __a : Any = False __a : Dict = True def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] UpperCAmelCase_ : Optional[int] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : Optional[int] = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) UpperCAmelCase_ : int = BartphoTokenizer(__magic_name__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCAmelCase__ ( self : str , **__magic_name__ : Dict ) -> Optional[Any]: """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = '''This is a là test''' UpperCAmelCase_ : List[Any] = '''This is a<unk><unk> test''' return input_text, output_text def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BartphoTokenizer(__magic_name__ , self.monolingual_vocab_file , **self.special_tokens_map ) UpperCAmelCase_ : str = '''This is a là test''' UpperCAmelCase_ : int = '''▁This ▁is ▁a ▁l à ▁t est'''.split() UpperCAmelCase_ : str = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Dict = tokens + [tokenizer.unk_token] UpperCAmelCase_ : Dict = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" UpperCAmelCase_ : List[Any] = False if num < 0: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Union[str, Any] = -num UpperCAmelCase_ : list[int] = [] while num > 0: binary.insert(0, num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary ) return "0b" + "".join(str(SCREAMING_SNAKE_CASE__ ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' from collections import defaultdict class __a : def __init__( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 UpperCAmelCase_ : str = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(__magic_name__ ) ) ] UpperCAmelCase_ : str = defaultdict(__magic_name__ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 UpperCAmelCase_ : Any = (1 << len(__magic_name__ )) - 1 def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Any ) -> Optional[Any]: """simple docstring""" # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement UpperCAmelCase_ : int = self.count_ways_until(__magic_name__ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. UpperCAmelCase_ : Dict = total_ways_util return self.dp[mask][task_no] def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" # Store the list of persons for each task for i in range(len(__magic_name__ ) ): for j in task_performed[i]: self.task[j].append(__magic_name__ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": snake_case_ : List[str] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. snake_case_ : List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' import json import os import unittest from transformers import DebertaTokenizer, DebertaTokenizerFast from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : int = DebertaTokenizer __a : Optional[int] = True __a : Union[str, Any] = DebertaTokenizerFast def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Tuple = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''[UNK]''', ] UpperCAmelCase_ : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : Tuple = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] UpperCAmelCase_ : List[str] = {'''unk_token''': '''[UNK]'''} UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : Optional[int] , **__magic_name__ : int ) -> str: """simple docstring""" kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = '''lower newer''' UpperCAmelCase_ : Optional[Any] = '''lower newer''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = '''lower newer''' UpperCAmelCase_ : List[str] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] UpperCAmelCase_ : Tuple = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Any = tokens + [tokenizer.unk_token] UpperCAmelCase_ : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" UpperCAmelCase_ : str = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = tokenizer('''Hello''' , '''World''' ) UpperCAmelCase_ : Any = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] self.assertListEqual(tokd['''token_type_ids'''] , __magic_name__ ) @slow def UpperCAmelCase__ ( self : Any ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCAmelCase_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=__magic_name__ ) UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode( '''sequence builders''' , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ ) UpperCAmelCase_ : List[Any] = tokenizer.encode( '''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode @slow def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = [self.tokenizer_class] if self.test_rust_tokenizer: tokenizer_classes.append(self.rust_tokenizer_class ) for tokenizer_class in tokenizer_classes: UpperCAmelCase_ : List[str] = tokenizer_class.from_pretrained('''microsoft/deberta-base''' ) UpperCAmelCase_ : int = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , padding=__magic_name__ ) UpperCAmelCase_ : List[str] = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding['''input_ids''']] # fmt: off UpperCAmelCase_ : Dict = { '''input_ids''': [ [1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2] ], '''token_type_ids''': [ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ] } # fmt: on UpperCAmelCase_ : str = [ '''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''', '''ALBERT incorporates two parameter reduction techniques''', '''The first one is a factorized embedding parameterization. By decomposing the large vocabulary''' ''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of''' ''' vocabulary embedding.''', ] self.assertDictEqual(encoding.data , __magic_name__ ) for expected, decoded in zip(__magic_name__ , __magic_name__ ): self.assertEqual(__magic_name__ , __magic_name__ )
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class __a (metaclass=lowerCamelCase ): __a : List[str] = ["flax", "transformers"] def __init__( self : List[str] , *__magic_name__ : int , **__magic_name__ : str ) -> str: """simple docstring""" requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : Optional[int] , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : List[str] , *__magic_name__ : List[Any] , **__magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) class __a (metaclass=lowerCamelCase ): __a : List[str] = ["flax", "transformers"] def __init__( self : List[Any] , *__magic_name__ : Optional[Any] , **__magic_name__ : str ) -> int: """simple docstring""" requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : Any , *__magic_name__ : str , **__magic_name__ : List[str] ) -> List[Any]: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__magic_name__ : Dict , **__magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) class __a (metaclass=lowerCamelCase ): __a : Any = ["flax", "transformers"] def __init__( self : List[str] , *__magic_name__ : List[str] , **__magic_name__ : Optional[int] ) -> Tuple: """simple docstring""" requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : Tuple , *__magic_name__ : Any , **__magic_name__ : List[str] ) -> List[str]: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , *__magic_name__ : Optional[int] , **__magic_name__ : Any ) -> Optional[int]: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) class __a (metaclass=lowerCamelCase ): __a : Dict = ["flax", "transformers"] def __init__( self : List[str] , *__magic_name__ : List[str] , **__magic_name__ : Tuple ) -> Optional[Any]: """simple docstring""" requires_backends(self , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> str: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] ) @classmethod def UpperCAmelCase__ ( cls : int , *__magic_name__ : Optional[Any] , **__magic_name__ : Union[str, Any] ) -> Any: """simple docstring""" requires_backends(cls , ['''flax''', '''transformers'''] )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 50 ) -> int: UpperCAmelCase_ : Tuple = [1] * (length + 1) for row_length in range(3, length + 1 ): for block_length in range(3, row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f'''{solution() = }''')
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1
'''simple docstring''' import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py snake_case_ : Optional[int] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. snake_case_ : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS) snake_case_ : Optional[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING snake_case_ : int = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"""config.{attribute}""" in modeling_source or F"""getattr(config, \"{attribute}\"""" in modeling_source or F"""getattr(self.config, \"{attribute}\"""" in modeling_source ): UpperCAmelCase_ : int = True # Deal with multi-line cases elif ( re.search( RF"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""", SCREAMING_SNAKE_CASE__, ) is not None ): UpperCAmelCase_ : Any = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: UpperCAmelCase_ : Optional[int] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files UpperCAmelCase_ : Union[str, Any] = [ '''bos_index''', '''eos_index''', '''pad_index''', '''unk_index''', '''mask_index''', '''image_size''', '''use_cache''', '''out_features''', '''out_indices''', ] UpperCAmelCase_ : Any = ['''encoder_no_repeat_ngram_size'''] # Special cases to be allowed UpperCAmelCase_ : Union[str, Any] = True if not attribute_used: UpperCAmelCase_ : List[str] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: UpperCAmelCase_ : Tuple = True elif attribute in ["tie_word_embeddings"] and default_value is False: UpperCAmelCase_ : Any = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: UpperCAmelCase_ : Optional[int] = True elif attribute.endswith('''_token_id''' ): UpperCAmelCase_ : Any = True # configuration class specific cases if not case_allowed: UpperCAmelCase_ : str = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__, [] ) UpperCAmelCase_ : Tuple = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: UpperCAmelCase_ : Any = dict(inspect.signature(config_class.__init__ ).parameters ) UpperCAmelCase_ : List[Any] = [x for x in list(signature.keys() ) if x not in ['''self''', '''kwargs''']] UpperCAmelCase_ : str = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass UpperCAmelCase_ : Tuple = {} if len(config_class.attribute_map ) > 0: UpperCAmelCase_ : Optional[int] = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files UpperCAmelCase_ : Optional[int] = inspect.getsourcefile(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = os.path.dirname(SCREAMING_SNAKE_CASE__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. UpperCAmelCase_ : Optional[int] = [os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for fn in os.listdir(SCREAMING_SNAKE_CASE__ ) if fn.startswith('''modeling_''' )] # Get the source code strings UpperCAmelCase_ : List[str] = [] for path in modeling_paths: if os.path.isfile(SCREAMING_SNAKE_CASE__ ): with open(SCREAMING_SNAKE_CASE__ ) as fp: modeling_sources.append(fp.read() ) UpperCAmelCase_ : Dict = [] for config_param, default_value in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): # `attributes` here is all the variant names for `config_param` UpperCAmelCase_ : List[Any] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): unused_attributes.append(attributes[0] ) return sorted(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : str = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) UpperCAmelCase_ : Any = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ), lambda SCREAMING_SNAKE_CASE__ : inspect.isclass(SCREAMING_SNAKE_CASE__ ) and issubclass(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and inspect.getmodule(SCREAMING_SNAKE_CASE__ ) == inspect.getmodule(_config_class ), ) ] for config_class in config_classes_in_module: UpperCAmelCase_ : Optional[Any] = check_config_attributes_being_used(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : Optional[int] = unused_attributes if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : Dict = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n''' for name, attributes in configs_with_unused_attributes.items(): error += F"""{name}: {attributes}\n""" raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": check_config_attributes()
644
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
1
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() snake_case_ : List[str] = logging.get_logger(__name__) snake_case_ : List[Any] = { "b0": efficientnet.EfficientNetBa, "b1": efficientnet.EfficientNetBa, "b2": efficientnet.EfficientNetBa, "b3": efficientnet.EfficientNetBa, "b4": efficientnet.EfficientNetBa, "b5": efficientnet.EfficientNetBa, "b6": efficientnet.EfficientNetBa, "b7": efficientnet.EfficientNetBa, } snake_case_ : Optional[Any] = { "b0": { "hidden_dim": 12_80, "width_coef": 1.0, "depth_coef": 1.0, "image_size": 2_24, "dropout_rate": 0.2, "dw_padding": [], }, "b1": { "hidden_dim": 12_80, "width_coef": 1.0, "depth_coef": 1.1, "image_size": 2_40, "dropout_rate": 0.2, "dw_padding": [16], }, "b2": { "hidden_dim": 14_08, "width_coef": 1.1, "depth_coef": 1.2, "image_size": 2_60, "dropout_rate": 0.3, "dw_padding": [5, 8, 16], }, "b3": { "hidden_dim": 15_36, "width_coef": 1.2, "depth_coef": 1.4, "image_size": 3_00, "dropout_rate": 0.3, "dw_padding": [5, 18], }, "b4": { "hidden_dim": 17_92, "width_coef": 1.4, "depth_coef": 1.8, "image_size": 3_80, "dropout_rate": 0.4, "dw_padding": [6], }, "b5": { "hidden_dim": 20_48, "width_coef": 1.6, "depth_coef": 2.2, "image_size": 4_56, "dropout_rate": 0.4, "dw_padding": [13, 27], }, "b6": { "hidden_dim": 23_04, "width_coef": 1.8, "depth_coef": 2.6, "image_size": 5_28, "dropout_rate": 0.5, "dw_padding": [31], }, "b7": { "hidden_dim": 25_60, "width_coef": 2.0, "depth_coef": 3.1, "image_size": 6_00, "dropout_rate": 0.5, "dw_padding": [18], }, } def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> Dict: UpperCAmelCase_ : int = EfficientNetConfig() UpperCAmelCase_ : str = CONFIG_MAP[model_name]['''hidden_dim'''] UpperCAmelCase_ : Any = CONFIG_MAP[model_name]['''width_coef'''] UpperCAmelCase_ : Tuple = CONFIG_MAP[model_name]['''depth_coef'''] UpperCAmelCase_ : List[Any] = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase_ : int = CONFIG_MAP[model_name]['''dropout_rate'''] UpperCAmelCase_ : Optional[int] = CONFIG_MAP[model_name]['''dw_padding'''] UpperCAmelCase_ : int = '''huggingface/label-files''' UpperCAmelCase_ : Dict = '''imagenet-1k-id2label.json''' UpperCAmelCase_ : Union[str, Any] = 1000 UpperCAmelCase_ : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ), '''r''' ) ) UpperCAmelCase_ : str = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()} UpperCAmelCase_ : Any = idalabel UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_ ( ) -> Optional[Any]: UpperCAmelCase_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' UpperCAmelCase_ : List[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw ) return im def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase_ : str = EfficientNetImageProcessor( size={'''height''': size, '''width''': size}, image_mean=[0.4_85, 0.4_56, 0.4_06], image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63], do_center_crop=SCREAMING_SNAKE_CASE__, ) return preprocessor def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> Tuple: UpperCAmelCase_ : Dict = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )] UpperCAmelCase_ : Optional[int] = sorted(set(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = {b: str(SCREAMING_SNAKE_CASE__ ) for b, i in zip(SCREAMING_SNAKE_CASE__, range(SCREAMING_SNAKE_CASE__ ) )} UpperCAmelCase_ : Optional[int] = [] rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') ) rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') ) rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') ) rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') ) rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') ) for b in block_names: UpperCAmelCase_ : Union[str, Any] = block_name_mapping[b] rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") ) rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") ) rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") ) rename_keys.append( (F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") ) rename_keys.append( (F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") ) rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") ) rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") ) rename_keys.append( (F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") ) rename_keys.append( (F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") ) rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") ) rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") ) rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") ) rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") ) rename_keys.append( (F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") ) rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") ) rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") ) rename_keys.append( (F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") ) rename_keys.append( (F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") ) rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') ) rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') ) rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') ) rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') ) rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') ) UpperCAmelCase_ : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: UpperCAmelCase_ : Optional[int] = '''efficientnet.''' + item[1] UpperCAmelCase_ : Any = '''classifier.weight''' UpperCAmelCase_ : Dict = '''classifier.bias''' return key_mapping def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[int]: for key, value in tf_params.items(): if "normalization" in key: continue UpperCAmelCase_ : List[str] = key_mapping[key] if "_conv" in key and "kernel" in key: UpperCAmelCase_ : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(3, 2, 0, 1 ) elif "depthwise_kernel" in key: UpperCAmelCase_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ ).permute(2, 3, 0, 1 ) elif "kernel" in key: UpperCAmelCase_ : str = torch.from_numpy(np.transpose(SCREAMING_SNAKE_CASE__ ) ) else: UpperCAmelCase_ : Tuple = torch.from_numpy(SCREAMING_SNAKE_CASE__ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any: UpperCAmelCase_ : Optional[Any] = model_classes[model_name]( include_top=SCREAMING_SNAKE_CASE__, weights='''imagenet''', input_tensor=SCREAMING_SNAKE_CASE__, input_shape=SCREAMING_SNAKE_CASE__, pooling=SCREAMING_SNAKE_CASE__, classes=1000, classifier_activation='''softmax''', ) UpperCAmelCase_ : int = original_model.trainable_variables UpperCAmelCase_ : List[Any] = original_model.non_trainable_variables UpperCAmelCase_ : Tuple = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: UpperCAmelCase_ : Dict = param.numpy() UpperCAmelCase_ : Dict = list(tf_params.keys() ) # Load HuggingFace model UpperCAmelCase_ : str = get_efficientnet_config(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = EfficientNetForImageClassification(SCREAMING_SNAKE_CASE__ ).eval() UpperCAmelCase_ : Optional[int] = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print('''Converting parameters...''' ) UpperCAmelCase_ : List[Any] = rename_keys(SCREAMING_SNAKE_CASE__ ) replace_params(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Initialize preprocessor and preprocess input image UpperCAmelCase_ : Optional[Any] = convert_image_processor(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = preprocessor(images=prepare_img(), return_tensors='''pt''' ) # HF model inference hf_model.eval() with torch.no_grad(): UpperCAmelCase_ : str = hf_model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = outputs.logits.detach().numpy() # Original model inference UpperCAmelCase_ : List[str] = False UpperCAmelCase_ : Union[str, Any] = CONFIG_MAP[model_name]['''image_size'''] UpperCAmelCase_ : str = prepare_img().resize((image_size, image_size), resample=PIL.Image.NEAREST ) UpperCAmelCase_ : Union[str, Any] = image.img_to_array(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = np.expand_dims(SCREAMING_SNAKE_CASE__, axis=0 ) UpperCAmelCase_ : Optional[int] = original_model.predict(SCREAMING_SNAKE_CASE__ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, atol=1E-3 ), "The predicted logits are not the same." print('''Model outputs match!''' ) if save_model: # Create folder to save model if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): os.mkdir(SCREAMING_SNAKE_CASE__ ) # Save converted model and image processor hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) preprocessor.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: # Push model and image processor to hub print(F"""Pushing converted {model_name} to the hub...""" ) UpperCAmelCase_ : List[str] = F"""efficientnet-{model_name}""" preprocessor.push_to_hub(SCREAMING_SNAKE_CASE__ ) hf_model.push_to_hub(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="b0", type=str, help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].", ) parser.add_argument( "--pytorch_dump_folder_path", default="hf_model", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--save_model", action="store_true", help="Save model to local") parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub") snake_case_ : List[Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase_ : Optional[Any] = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Sequence[float], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> tuple[int | None, int | None, float]: if not arr: return None, None, 0 if low == high: return low, high, arr[low] UpperCAmelCase_ : Optional[Any] = (low + high) // 2 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = max_subarray(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = max_subarray(SCREAMING_SNAKE_CASE__, mid + 1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Sequence[float], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> tuple[int, int, float]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = float('''-inf''' ), -1 UpperCAmelCase_ , UpperCAmelCase_ : Any = float('''-inf''' ), -1 UpperCAmelCase_ : int | float = 0 for i in range(SCREAMING_SNAKE_CASE__, low - 1, -1 ): summ += arr[i] if summ > left_sum: UpperCAmelCase_ : int = summ UpperCAmelCase_ : Dict = i UpperCAmelCase_ : int = 0 for i in range(mid + 1, high + 1 ): summ += arr[i] if summ > right_sum: UpperCAmelCase_ : List[str] = summ UpperCAmelCase_ : Optional[int] = i return max_left, max_right, (left_sum + right_sum) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> float: UpperCAmelCase_ : List[Any] = [randint(1, SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] UpperCAmelCase_ : int = time.time() max_subarray(SCREAMING_SNAKE_CASE__, 0, input_size - 1 ) UpperCAmelCase_ : Union[str, Any] = time.time() return end - start def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : Optional[int] = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000] UpperCAmelCase_ : str = [time_max_subarray(SCREAMING_SNAKE_CASE__ ) for input_size in input_sizes] print('''No of Inputs\t\tTime Taken''' ) for input_size, runtime in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): print(SCREAMING_SNAKE_CASE__, '''\t\t''', SCREAMING_SNAKE_CASE__ ) plt.plot(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) plt.xlabel('''Number of Inputs''' ) plt.ylabel('''Time taken in seconds''' ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 3, SCREAMING_SNAKE_CASE__ : int = 7, SCREAMING_SNAKE_CASE__ : int = 1000000 ) -> int: UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Tuple = 1 for current_denominator in range(1, limit + 1 ): UpperCAmelCase_ : Optional[int] = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: UpperCAmelCase_ : int = current_numerator UpperCAmelCase_ : Optional[int] = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_00_00_00))
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' import numpy as np def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any ) -> Optional[Any]: UpperCAmelCase_ : str = int(np.ceil((x_end - xa) / h ) ) UpperCAmelCase_ : str = np.zeros((n + 1,) ) UpperCAmelCase_ : Tuple = ya UpperCAmelCase_ : Optional[int] = xa for k in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : int = f(SCREAMING_SNAKE_CASE__, y[k] ) UpperCAmelCase_ : Optional[int] = f(x + 0.5 * h, y[k] + 0.5 * h * ka ) UpperCAmelCase_ : Optional[Any] = f(x + 0.5 * h, y[k] + 0.5 * h * ka ) UpperCAmelCase_ : int = f(x + h, y[k] + h * ka ) UpperCAmelCase_ : str = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = sum(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1, n + 1 ): UpperCAmelCase_ : Tuple = True for i in range(1, s + 1 ): UpperCAmelCase_ : str = False for i in range(1, n + 1 ): for j in range(1, s + 1 ): UpperCAmelCase_ : Any = dp[i][j - 1] if arr[i - 1] <= j: UpperCAmelCase_ : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ), -1, -1 ): if dp[n][j] is True: UpperCAmelCase_ : Dict = s - 2 * j break return diff
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case_ : Optional[Any] = logging.get_logger(__name__) snake_case_ : str = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } snake_case_ : Any = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } snake_case_ : List[Any] = {"facebook/blenderbot-3B": 1_28} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase_ ( ) -> List[Any]: UpperCAmelCase_ : Any = ( list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) ) ) UpperCAmelCase_ : Optional[int] = bs[:] UpperCAmelCase_ : List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE__ ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Dict = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> str: UpperCAmelCase_ : List[Any] = set() UpperCAmelCase_ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : Optional[int] = char return pairs class __a (lowerCamelCase ): __a : List[Any] = VOCAB_FILES_NAMES __a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : List[Any] = ["input_ids", "attention_mask"] def __init__( self : int , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : str="replace" , __magic_name__ : Optional[Any]="<s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : Any="</s>" , __magic_name__ : List[str]="<s>" , __magic_name__ : Union[str, Any]="<unk>" , __magic_name__ : List[str]="<pad>" , __magic_name__ : str="<mask>" , __magic_name__ : Any=False , **__magic_name__ : Tuple , ) -> Tuple: """simple docstring""" UpperCAmelCase_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token UpperCAmelCase_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token UpperCAmelCase_ : str = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token UpperCAmelCase_ : Tuple = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token UpperCAmelCase_ : Optional[int] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token UpperCAmelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle: UpperCAmelCase_ : Dict = json.load(__magic_name__ ) UpperCAmelCase_ : List[Any] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Any = errors # how to handle errors in decoding UpperCAmelCase_ : Optional[int] = bytes_to_unicode() UpperCAmelCase_ : List[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle: UpperCAmelCase_ : str = merges_handle.read().split('''\n''' )[1:-1] UpperCAmelCase_ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : Tuple = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : Union[str, Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : Any = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] ) -> str: """simple docstring""" if token in self.cache: return self.cache[token] UpperCAmelCase_ : List[Any] = tuple(__magic_name__ ) UpperCAmelCase_ : int = get_pairs(__magic_name__ ) if not pairs: return token while True: UpperCAmelCase_ : Any = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = bigram UpperCAmelCase_ : int = [] UpperCAmelCase_ : List[str] = 0 while i < len(__magic_name__ ): try: UpperCAmelCase_ : Any = word.index(__magic_name__ , __magic_name__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : Optional[int] = j if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : Union[str, Any] = tuple(__magic_name__ ) UpperCAmelCase_ : List[Any] = new_word if len(__magic_name__ ) == 1: break else: UpperCAmelCase_ : Any = get_pairs(__magic_name__ ) UpperCAmelCase_ : Optional[int] = ''' '''.join(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = word return word def UpperCAmelCase__ ( self : int , __magic_name__ : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = [] for token in re.findall(self.pat , __magic_name__ ): UpperCAmelCase_ : Tuple = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(''' ''' ) ) return bpe_tokens def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> str: """simple docstring""" return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" return self.decoder.get(__magic_name__ ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ) UpperCAmelCase_ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : List[Any] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Tuple = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' ) UpperCAmelCase_ : int = 0 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) UpperCAmelCase_ : Optional[int] = token_index writer.write(''' '''.join(__magic_name__ ) + '''\n''' ) index += 1 return vocab_file, merge_file def UpperCAmelCase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : Any = [self.sep_token_id] UpperCAmelCase_ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : str=False , **__magic_name__ : Optional[Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : List[str] = ''' ''' + text return (text, kwargs) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[Any]: """simple docstring""" return token_ids_a + [self.eos_token_id] def UpperCAmelCase__ ( self : Dict , __magic_name__ : "Conversation" ) -> List[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(__magic_name__ ) UpperCAmelCase_ : Optional[int] = ''' '''.join(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = self.encode(__magic_name__ ) if len(__magic_name__ ) > self.model_max_length: UpperCAmelCase_ : Tuple = input_ids[-self.model_max_length :] logger.warning(F"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> str: if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) UpperCAmelCase_ : int = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b" UpperCAmelCase_ : Optional[Any] = str(bin(SCREAMING_SNAKE_CASE__ ) )[2:] # remove the leading "0b" UpperCAmelCase_ : int = max(len(SCREAMING_SNAKE_CASE__ ), len(SCREAMING_SNAKE_CASE__ ) ) return "0b" + "".join( str(int(char_a == '''1''' and char_b == '''1''' ) ) for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE__ ), b_binary.zfill(SCREAMING_SNAKE_CASE__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets snake_case_ : Optional[int] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n" snake_case_ : int = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n" snake_case_ : str = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n" def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: def remove_articles(SCREAMING_SNAKE_CASE__ : Optional[int] ): UpperCAmelCase_ : Any = re.compile(R'''\b(a|an|the)\b''', re.UNICODE ) return re.sub(SCREAMING_SNAKE_CASE__, ''' ''', SCREAMING_SNAKE_CASE__ ) def white_space_fix(SCREAMING_SNAKE_CASE__ : str ): return " ".join(text.split() ) def remove_punc(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): UpperCAmelCase_ : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(SCREAMING_SNAKE_CASE__ : List[str] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE__ ) ) ) ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[int]: return int(normalize_answer(SCREAMING_SNAKE_CASE__ ) == normalize_answer(SCREAMING_SNAKE_CASE__ ) ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[Any] ) -> int: UpperCAmelCase_ : Tuple = [any(compute_exact(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) for ref in refs ) for pred, refs in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )] return (sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )) * 100 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCAmelCase_ : Any = Counter(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = Counter(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCAmelCase_ : Optional[Any] = scount * numref UpperCAmelCase_ : List[str] = Counter(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = Counter() for cgram, ccount in cgramcounter.items(): UpperCAmelCase_ : Any = ccount * numref # KEEP UpperCAmelCase_ : List[str] = sgramcounter_rep & cgramcounter_rep UpperCAmelCase_ : Tuple = keepgramcounter_rep & rgramcounter UpperCAmelCase_ : Any = sgramcounter_rep & rgramcounter UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : Optional[int] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : Optional[Any] = keeptmpscorea / len(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCAmelCase_ : List[Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCAmelCase_ : List[Any] = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCAmelCase_ : List[Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCAmelCase_ : List[Any] = sgramcounter_rep - cgramcounter_rep UpperCAmelCase_ : Optional[Any] = delgramcounter_rep - rgramcounter UpperCAmelCase_ : List[str] = sgramcounter_rep - rgramcounter UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : Tuple = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : Union[str, Any] = 1 if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : Union[str, Any] = deltmpscorea / len(SCREAMING_SNAKE_CASE__ ) # ADDITION UpperCAmelCase_ : str = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = set(SCREAMING_SNAKE_CASE__ ) & set(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = set(SCREAMING_SNAKE_CASE__ ) - set(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = 1 if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : str = addtmpscore / len(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : Optional[Any] = addtmpscore / len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCAmelCase_ : Any = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Dict: UpperCAmelCase_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = ssent.split(''' ''' ) UpperCAmelCase_ : Tuple = csent.split(''' ''' ) UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Union[str, Any] = [] for rsent in rsents: UpperCAmelCase_ : List[Any] = rsent.split(''' ''' ) UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Union[str, Any] = [] UpperCAmelCase_ : Optional[int] = [] ragramslist.append(SCREAMING_SNAKE_CASE__ ) for i in range(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : List[str] = ragrams[i] + ''' ''' + ragrams[i + 1] ragrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 2: UpperCAmelCase_ : Any = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] ragrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 3: UpperCAmelCase_ : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3] ragrams.append(SCREAMING_SNAKE_CASE__ ) ragramslist.append(SCREAMING_SNAKE_CASE__ ) ragramslist.append(SCREAMING_SNAKE_CASE__ ) ragramslist.append(SCREAMING_SNAKE_CASE__ ) for i in range(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] sagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 2: UpperCAmelCase_ : Optional[int] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] sagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 3: UpperCAmelCase_ : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3] sagrams.append(SCREAMING_SNAKE_CASE__ ) for i in range(0, len(SCREAMING_SNAKE_CASE__ ) - 1 ): if i < len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Any = cagrams[i] + ''' ''' + cagrams[i + 1] cagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 2: UpperCAmelCase_ : int = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] cagrams.append(SCREAMING_SNAKE_CASE__ ) if i < len(SCREAMING_SNAKE_CASE__ ) - 3: UpperCAmelCase_ : str = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3] cagrams.append(SCREAMING_SNAKE_CASE__ ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Tuple = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : Any = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) : List[str] = SARIngram(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCAmelCase_ : Optional[int] = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCAmelCase_ : List[str] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCAmelCase_ : Dict = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : bool = True, SCREAMING_SNAKE_CASE__ : str = "13a", SCREAMING_SNAKE_CASE__ : bool = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCAmelCase_ : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCAmelCase_ : Optional[Any] = sacrebleu.metrics.bleu._get_tokenizer(SCREAMING_SNAKE_CASE__ )()(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase_ : Union[str, Any] = sacrebleu.TOKENIZERS[tokenizer]()(SCREAMING_SNAKE_CASE__ ) elif tokenizer == "moses": UpperCAmelCase_ : Optional[int] = sacremoses.MosesTokenizer().tokenize(SCREAMING_SNAKE_CASE__, return_str=SCREAMING_SNAKE_CASE__, escape=SCREAMING_SNAKE_CASE__ ) elif tokenizer == "penn": UpperCAmelCase_ : int = sacremoses.MosesTokenizer().penn_tokenize(SCREAMING_SNAKE_CASE__, return_str=SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase_ : List[Any] = sentence if not return_str: UpperCAmelCase_ : Any = normalized_sent.split() return normalized_sent def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict: if not (len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )): raise ValueError('''Sources length must match predictions and references lengths.''' ) UpperCAmelCase_ : Union[str, Any] = 0 for src, pred, refs in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): sari_score += SARIsent(normalize(SCREAMING_SNAKE_CASE__ ), normalize(SCREAMING_SNAKE_CASE__ ), [normalize(SCREAMING_SNAKE_CASE__ ) for sent in refs] ) UpperCAmelCase_ : Optional[int] = sari_score / len(SCREAMING_SNAKE_CASE__ ) return 100 * sari_score def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Optional[int]="exp", SCREAMING_SNAKE_CASE__ : List[str]=None, SCREAMING_SNAKE_CASE__ : Optional[int]=False, SCREAMING_SNAKE_CASE__ : List[str]=False, SCREAMING_SNAKE_CASE__ : Optional[int]=False, ) -> str: UpperCAmelCase_ : Any = len(references[0] ) if any(len(SCREAMING_SNAKE_CASE__ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) UpperCAmelCase_ : int = [[refs[i] for refs in references] for i in range(SCREAMING_SNAKE_CASE__ )] UpperCAmelCase_ : int = sacrebleu.corpus_bleu( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, smooth_method=SCREAMING_SNAKE_CASE__, smooth_value=SCREAMING_SNAKE_CASE__, force=SCREAMING_SNAKE_CASE__, lowercase=SCREAMING_SNAKE_CASE__, use_effective_order=SCREAMING_SNAKE_CASE__, ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a (datasets.Metric ): def UpperCAmelCase__ ( self : Optional[int] ) -> str: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = {} result.update({'''sari''': compute_sari(sources=__magic_name__ , predictions=__magic_name__ , references=__magic_name__ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=__magic_name__ , references=__magic_name__ )} ) result.update({'''exact''': compute_em(predictions=__magic_name__ , references=__magic_name__ )} ) return result
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
'''simple docstring''' from __future__ import annotations from random import random from typing import Generic, TypeVar snake_case_ : List[str] = TypeVar("KT") snake_case_ : int = TypeVar("VT") class __a (Generic[KT, VT] ): def __init__( self : Union[str, Any] , __magic_name__ : KT | str = "root" , __magic_name__ : VT | None = None ) -> str: """simple docstring""" UpperCAmelCase_ : str = key UpperCAmelCase_ : Dict = value UpperCAmelCase_ : list[Node[KT, VT]] = [] def __repr__( self : Optional[Any] ) -> str: """simple docstring""" return F"""Node({self.key}: {self.value})""" @property def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" return len(self.forward ) class __a (Generic[KT, VT] ): def __init__( self : str , __magic_name__ : float = 0.5 , __magic_name__ : int = 16 ) -> Any: """simple docstring""" UpperCAmelCase_ : Node[KT, VT] = Node[KT, VT]() UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = p UpperCAmelCase_ : Tuple = max_level def __str__( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[Any] = list(self ) if len(__magic_name__ ) == 0: return F"""SkipList(level={self.level})""" UpperCAmelCase_ : Tuple = max((len(str(__magic_name__ ) ) for item in items) , default=4 ) UpperCAmelCase_ : Tuple = max(__magic_name__ , 4 ) + 4 UpperCAmelCase_ : Dict = self.head UpperCAmelCase_ : str = [] UpperCAmelCase_ : int = node.forward.copy() lines.append(F"""[{node.key}]""".ljust(__magic_name__ , '''-''' ) + '''* ''' * len(__magic_name__ ) ) lines.append(''' ''' * label_size + '''| ''' * len(__magic_name__ ) ) while len(node.forward ) != 0: UpperCAmelCase_ : str = node.forward[0] lines.append( F"""[{node.key}]""".ljust(__magic_name__ , '''-''' ) + ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) ) lines.append(''' ''' * label_size + '''| ''' * len(__magic_name__ ) ) UpperCAmelCase_ : List[Any] = node.forward lines.append('''None'''.ljust(__magic_name__ ) + '''* ''' * len(__magic_name__ ) ) return F"""SkipList(level={self.level})\n""" + "\n".join(__magic_name__ ) def __iter__( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.head while len(node.forward ) != 0: yield node.forward[0].key UpperCAmelCase_ : Any = node.forward[0] def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = 1 while random() < self.p and level < self.max_level: level += 1 return level def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]: """simple docstring""" UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : str = self.head for i in reversed(range(self.level ) ): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: UpperCAmelCase_ : int = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(__magic_name__ ) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward ) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def UpperCAmelCase__ ( self : int , __magic_name__ : KT ) -> List[str]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = self._locate_node(__magic_name__ ) if node is not None: for i, update_node in enumerate(__magic_name__ ): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: UpperCAmelCase_ : Optional[int] = node.forward[i] else: UpperCAmelCase_ : List[Any] = update_node.forward[:i] def UpperCAmelCase__ ( self : str , __magic_name__ : KT , __magic_name__ : VT ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Dict = self._locate_node(__magic_name__ ) if node is not None: UpperCAmelCase_ : str = value else: UpperCAmelCase_ : Optional[Any] = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , __magic_name__ ): update_vector.append(self.head ) UpperCAmelCase_ : int = level UpperCAmelCase_ : Optional[int] = Node(__magic_name__ , __magic_name__ ) for i, update_node in enumerate(update_vector[:level] ): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i] ) if update_node.level < i + 1: update_node.forward.append(__magic_name__ ) else: UpperCAmelCase_ : Tuple = new_node def UpperCAmelCase__ ( self : Any , __magic_name__ : VT ) -> VT | None: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._locate_node(__magic_name__ ) if node is not None: return node.value return None def lowerCamelCase_ ( ) -> List[Any]: UpperCAmelCase_ : List[Any] = SkipList() skip_list.insert('''Key1''', 3 ) skip_list.insert('''Key2''', 12 ) skip_list.insert('''Key3''', 41 ) skip_list.insert('''Key4''', -19 ) UpperCAmelCase_ : Dict = skip_list.head UpperCAmelCase_ : Any = {} while node.level != 0: UpperCAmelCase_ : List[str] = node.forward[0] UpperCAmelCase_ : Any = node.value assert len(SCREAMING_SNAKE_CASE__ ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : List[str] = SkipList() skip_list.insert('''Key1''', 10 ) skip_list.insert('''Key1''', 12 ) skip_list.insert('''Key5''', 7 ) skip_list.insert('''Key7''', 10 ) skip_list.insert('''Key10''', 5 ) skip_list.insert('''Key7''', 7 ) skip_list.insert('''Key5''', 5 ) skip_list.insert('''Key10''', 10 ) UpperCAmelCase_ : Optional[Any] = skip_list.head UpperCAmelCase_ : Dict = {} while node.level != 0: UpperCAmelCase_ : List[Any] = node.forward[0] UpperCAmelCase_ : Dict = node.value if len(SCREAMING_SNAKE_CASE__ ) != 4: print() assert len(SCREAMING_SNAKE_CASE__ ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowerCamelCase_ ( ) -> Union[str, Any]: UpperCAmelCase_ : Any = SkipList() assert skip_list.find('''Some key''' ) is None def lowerCamelCase_ ( ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = SkipList() skip_list.insert('''Key2''', 20 ) assert skip_list.find('''Key2''' ) == 20 skip_list.insert('''Some Key''', 10 ) skip_list.insert('''Key2''', 8 ) skip_list.insert('''V''', 13 ) assert skip_list.find('''Y''' ) is None assert skip_list.find('''Key2''' ) == 8 assert skip_list.find('''Some Key''' ) == 10 assert skip_list.find('''V''' ) == 13 def lowerCamelCase_ ( ) -> Optional[int]: UpperCAmelCase_ : int = SkipList() skip_list.delete('''Some key''' ) assert len(skip_list.head.forward ) == 0 def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[Any] = SkipList() skip_list.insert('''Key1''', 12 ) skip_list.insert('''V''', 13 ) skip_list.insert('''X''', 14 ) skip_list.insert('''Key2''', 15 ) skip_list.delete('''V''' ) skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''Key2''' ) is None def lowerCamelCase_ ( ) -> Optional[Any]: UpperCAmelCase_ : int = SkipList() skip_list.insert('''Key1''', 12 ) skip_list.insert('''V''', 13 ) skip_list.insert('''X''', 14 ) skip_list.insert('''Key2''', 15 ) skip_list.delete('''V''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) == 14 assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''X''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) == 12 assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key1''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) == 15 skip_list.delete('''Key2''' ) assert skip_list.find('''V''' ) is None assert skip_list.find('''X''' ) is None assert skip_list.find('''Key1''' ) is None assert skip_list.find('''Key2''' ) is None def lowerCamelCase_ ( ) -> Tuple: UpperCAmelCase_ : Tuple = SkipList() skip_list.insert('''Key1''', 12 ) skip_list.insert('''V''', 13 ) skip_list.insert('''X''', 142 ) skip_list.insert('''Key2''', 15 ) skip_list.delete('''X''' ) def traverse_keys(SCREAMING_SNAKE_CASE__ : Optional[Any] ): yield node.key for forward_node in node.forward: yield from traverse_keys(SCREAMING_SNAKE_CASE__ ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowerCamelCase_ ( ) -> Union[str, Any]: def is_sorted(SCREAMING_SNAKE_CASE__ : Tuple ): return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE__, lst[1:] ) ) UpperCAmelCase_ : Union[str, Any] = SkipList() for i in range(10 ): skip_list.insert(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) ) skip_list.insert(-12, -12 ) skip_list.insert(77, 77 ) assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) ) def lowerCamelCase_ ( ) -> Any: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : Dict = SkipList() skip_list.insert(2, '''2''' ) skip_list.insert(4, '''4''' ) skip_list.insert(6, '''4''' ) skip_list.insert(4, '''5''' ) skip_list.insert(8, '''4''' ) skip_list.insert(9, '''4''' ) skip_list.delete(4 ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' import os import re import unicodedata from shutil import copyfile from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import is_torch_available, logging if is_torch_available(): import torch if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation snake_case_ : int = logging.get_logger(__name__) snake_case_ : Union[str, Any] = {"vocab_file": "spiece.model"} snake_case_ : Dict = { "vocab_file": { "AI-Sweden/gpt-sw3-126m": "https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-350m": "https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-1.6b": "https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-6.7b": "https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model", "AI-Sweden/gpt-sw3-20b": "https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model", } } snake_case_ : Optional[int] = { "AI-Sweden/gpt-sw3-126m": 20_48, "AI-Sweden/gpt-sw3-350m": 20_48, "AI-Sweden/gpt-sw3-1.6b": 20_48, "AI-Sweden/gpt-sw3-6.7b": 20_48, "AI-Sweden/gpt-sw3-20b": 20_48, } class __a (lowerCamelCase ): __a : List[str] = VOCAB_FILES_NAMES __a : int = PRETRAINED_VOCAB_FILES_MAP __a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , __magic_name__ : List[str] , __magic_name__ : str=False , __magic_name__ : List[str]=False , __magic_name__ : Any=False , __magic_name__ : Tuple=None , __magic_name__ : str=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : int=None , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs UpperCAmelCase_ : Union[str, Any] = kwargs.get('''name_or_path''' ) if name_or_path is None: logger.warning( '''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,''' ''' you are testing the model, this can safely be ignored''' ) UpperCAmelCase_ : Optional[Any] = '''None''' # Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing UpperCAmelCase_ : Any = '''<|endoftext|>''' if eos_token is None else eos_token UpperCAmelCase_ : Dict = '''<unk>''' if unk_token is None else unk_token if "gpt-sw3-7b" in name_or_path: UpperCAmelCase_ : Optional[int] = unk_token if pad_token is None else pad_token UpperCAmelCase_ : Any = eos_token if bos_token is None else bos_token else: UpperCAmelCase_ : List[str] = '''<pad>''' if pad_token is None else pad_token UpperCAmelCase_ : Optional[int] = '''<s>''' if bos_token is None else bos_token super().__init__( do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Any = do_lower_case UpperCAmelCase_ : Union[str, Any] = remove_space UpperCAmelCase_ : List[str] = keep_accents UpperCAmelCase_ : Tuple = vocab_file UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # Used for whitespace normalization in input texts # fmt : off UpperCAmelCase_ : int = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', '''„'''} # fmt : on # Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing UpperCAmelCase_ : Union[str, Any] = re.compile( F"""[{"".join(map(__magic_name__ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" ) def __getstate__( self : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.__dict__.copy() UpperCAmelCase_ : str = None return state def __setstate__( self : Union[str, Any] , __magic_name__ : Tuple ) -> int: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : Union[str, Any] = {} UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) @property # Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" return len(self.sp_model ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> str: """simple docstring""" UpperCAmelCase_ : List[str] = self.non_printing_characters_re.sub('''''' , __magic_name__ ) # Normalize whitespaces UpperCAmelCase_ : Any = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] ) # NFC Unicode normalization UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFC''' , __magic_name__ ) return text def UpperCAmelCase__ ( self : Tuple , __magic_name__ : str , **__magic_name__ : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = self.preprocess_text(__magic_name__ ) return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str ) -> int: """simple docstring""" return self.sp_model.PieceToId(__magic_name__ ) def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> str: """simple docstring""" return self.sp_model.IdToPiece(__magic_name__ ) @staticmethod def UpperCAmelCase__ ( __magic_name__ : str ) -> str: """simple docstring""" return out_string def UpperCAmelCase__ ( self : Tuple , __magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Any = '''''' UpperCAmelCase_ : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: # TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__magic_name__ ) + token UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : int = [] else: current_sub_tokens.append(__magic_name__ ) UpperCAmelCase_ : str = False out_string += self.sp_model.decode(__magic_name__ ) return out_string def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict[str, int]: """simple docstring""" UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : Union[str, Any] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : List[Any] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[str, bool] = False ) -> Union[List[int], List[List[int]], "torch.Tensor"]: """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : Any = self.preprocess_text(__magic_name__ ) UpperCAmelCase_ : str = self.sp_model.encode(__magic_name__ ) else: UpperCAmelCase_ : Any = [self.preprocess_text(__magic_name__ ) for t in text] UpperCAmelCase_ : Optional[Any] = self.sp_model.encode(__magic_name__ ) if return_tensors is True or return_tensors == "pt": UpperCAmelCase_ : Tuple = torch.tensor(__magic_name__ ) return token_ids def UpperCAmelCase__ ( self : Dict , __magic_name__ : Union[int, List[int]] ) -> str: """simple docstring""" return self.sp_model.decode(__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Conversation" ) -> List[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [F"""User: {text}""" if is_user else F"""Bot: {text}""" for is_user, text in conversation.iter_texts()] UpperCAmelCase_ : Any = ( F"""{self.eos_token}{self.bos_token}""" + F"""{self.bos_token}""".join(__magic_name__ ) + F"""{self.bos_token}Bot:""" ) return self.encode(text=__magic_name__ )
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str: UpperCAmelCase_ : Optional[Any] = 0 # if input_string is "aba" than new_input_string become "a|b|a" UpperCAmelCase_ : Tuple = '''''' UpperCAmelCase_ : Dict = '''''' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(SCREAMING_SNAKE_CASE__ ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = 0, 0 # length[i] shows the length of palindromic substring with center i UpperCAmelCase_ : List[Any] = [1 for i in range(len(SCREAMING_SNAKE_CASE__ ) )] # for each character in new_string find corresponding palindromic string UpperCAmelCase_ : str = 0 for j in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : Union[str, Any] = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 ) while ( j - k >= 0 and j + k < len(SCREAMING_SNAKE_CASE__ ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 UpperCAmelCase_ : List[Any] = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: UpperCAmelCase_ : Union[str, Any] = j - k + 1 # noqa: E741 UpperCAmelCase_ : Union[str, Any] = j + k - 1 # update max_length and start position if max_length < length[j]: UpperCAmelCase_ : Any = length[j] UpperCAmelCase_ : List[str] = j # create that string UpperCAmelCase_ : Any = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Dict = StableDiffusionControlNetImgaImgPipeline __a : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} ) __a : str = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) UpperCAmelCase_ : str = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) torch.manual_seed(0 ) UpperCAmelCase_ : str = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , ) torch.manual_seed(0 ) UpperCAmelCase_ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase_ : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) UpperCAmelCase_ : List[Any] = CLIPTextModel(__magic_name__ ) UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : int = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any]=0 ) -> List[str]: """simple docstring""" if str(__magic_name__ ).startswith('''mps''' ): UpperCAmelCase_ : str = torch.manual_seed(__magic_name__ ) else: UpperCAmelCase_ : Optional[int] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) UpperCAmelCase_ : List[Any] = 2 UpperCAmelCase_ : Tuple = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ) UpperCAmelCase_ : List[str] = floats_tensor(control_image.shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase_ : int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = StableDiffusionControlNetImgaImgPipeline __a : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} __a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS __a : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def UpperCAmelCase__ ( self : int ) -> int: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) torch.manual_seed(0 ) def init_weights(__magic_name__ : str ): if isinstance(__magic_name__ , torch.nn.Convad ): torch.nn.init.normal(m.weight ) m.bias.data.fill_(1.0 ) UpperCAmelCase_ : Optional[Any] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = ControlNetModel( block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , ) controlneta.controlnet_down_blocks.apply(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) UpperCAmelCase_ : Any = CLIPTextModel(__magic_name__ ) UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) UpperCAmelCase_ : Tuple = MultiControlNetModel([controlneta, controlneta] ) UpperCAmelCase_ : Union[str, Any] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : Optional[int]=0 ) -> List[Any]: """simple docstring""" if str(__magic_name__ ).startswith('''mps''' ): UpperCAmelCase_ : Any = torch.manual_seed(__magic_name__ ) else: UpperCAmelCase_ : Optional[Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Tuple = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__magic_name__ , device=torch.device(__magic_name__ ) , ), ] UpperCAmelCase_ : Any = floats_tensor(control_image[0].shape , rng=random.Random(__magic_name__ ) ).to(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase_ : List[Any] = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((64, 64) ) UpperCAmelCase_ : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : List[str] = self.pipeline_class(**__magic_name__ ) pipe.to(__magic_name__ ) UpperCAmelCase_ : List[Any] = 1_0.0 UpperCAmelCase_ : Optional[int] = 4 UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(__magic_name__ ) UpperCAmelCase_ : Tuple = steps UpperCAmelCase_ : List[Any] = scale UpperCAmelCase_ : str = pipe(**__magic_name__ )[0] UpperCAmelCase_ : Any = self.get_dummy_inputs(__magic_name__ ) UpperCAmelCase_ : Dict = steps UpperCAmelCase_ : Dict = scale UpperCAmelCase_ : str = pipe(**__magic_name__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0] UpperCAmelCase_ : Any = self.get_dummy_inputs(__magic_name__ ) UpperCAmelCase_ : int = steps UpperCAmelCase_ : Union[str, Any] = scale UpperCAmelCase_ : Optional[int] = pipe(**__magic_name__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0] UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(__magic_name__ ) UpperCAmelCase_ : Optional[int] = steps UpperCAmelCase_ : List[Any] = scale UpperCAmelCase_ : Union[str, Any] = pipe(**__magic_name__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 assert np.sum(np.abs(output_a - output_a ) ) > 1E-3 def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 ) def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_dummy_components() UpperCAmelCase_ : str = self.pipeline_class(**__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(__magic_name__ ) except NotImplementedError: pass @slow @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' ) UpperCAmelCase_ : Any = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ , controlnet=__magic_name__ ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) UpperCAmelCase_ : Dict = '''evil space-punk bird''' UpperCAmelCase_ : str = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) ) UpperCAmelCase_ : List[Any] = pipe( __magic_name__ , __magic_name__ , control_image=__magic_name__ , generator=__magic_name__ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , ) UpperCAmelCase_ : Tuple = output.images[0] assert image.shape == (5_12, 5_12, 3) UpperCAmelCase_ : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' ) assert np.abs(expected_image - image ).max() < 9E-2
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __a (lowerCamelCase ): __a : Optional[int] = ["image_processor", "tokenizer"] __a : Optional[Any] = "BlipImageProcessor" __a : List[str] = ("BertTokenizer", "BertTokenizerFast") def __init__( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = False super().__init__(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Union[str, Any] = self.image_processor def __call__( self : Optional[int] , __magic_name__ : ImageInput = None , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Optional[int] , ) -> BatchEncoding: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: UpperCAmelCase_ : Any = self.tokenizer UpperCAmelCase_ : Any = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) return text_encoding # add pixel_values UpperCAmelCase_ : Union[str, Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ ) if text is not None: UpperCAmelCase_ : int = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) else: UpperCAmelCase_ : Optional[Any] = None if text_encoding is not None: encoding_image_processor.update(__magic_name__ ) return encoding_image_processor def UpperCAmelCase__ ( self : int , *__magic_name__ : Any , **__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , *__magic_name__ : Optional[Any] , **__magic_name__ : List[str] ) -> Dict: """simple docstring""" return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" UpperCAmelCase_ : Tuple = self.tokenizer.model_input_names UpperCAmelCase_ : Any = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Optional[Any] = logging.get_logger(__name__) snake_case_ : Tuple = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class __a (lowerCamelCase ): __a : Tuple = "levit" def __init__( self : List[str] , __magic_name__ : Optional[Any]=2_24 , __magic_name__ : Tuple=3 , __magic_name__ : List[str]=3 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[Any]=1 , __magic_name__ : Optional[Any]=16 , __magic_name__ : str=[1_28, 2_56, 3_84] , __magic_name__ : Union[str, Any]=[4, 8, 12] , __magic_name__ : int=[4, 4, 4] , __magic_name__ : List[Any]=[16, 16, 16] , __magic_name__ : Dict=0 , __magic_name__ : Union[str, Any]=[2, 2, 2] , __magic_name__ : int=[2, 2, 2] , __magic_name__ : Any=0.0_2 , **__magic_name__ : Union[str, Any] , ) -> Dict: """simple docstring""" super().__init__(**__magic_name__ ) UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : str = num_channels UpperCAmelCase_ : Tuple = kernel_size UpperCAmelCase_ : int = stride UpperCAmelCase_ : List[Any] = padding UpperCAmelCase_ : int = hidden_sizes UpperCAmelCase_ : int = num_attention_heads UpperCAmelCase_ : Union[str, Any] = depths UpperCAmelCase_ : List[Any] = key_dim UpperCAmelCase_ : Tuple = drop_path_rate UpperCAmelCase_ : Optional[Any] = patch_size UpperCAmelCase_ : Tuple = attention_ratio UpperCAmelCase_ : Union[str, Any] = mlp_ratio UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Tuple = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class __a (lowerCamelCase ): __a : Optional[Any] = version.parse("1.11" ) @property def UpperCAmelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Any ) -> float: """simple docstring""" return 1E-4
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' import io import itertools import json from dataclasses import dataclass from typing import Optional import pyarrow as pa import pyarrow.json as paj import datasets from datasets.table import table_cast from datasets.utils.file_utils import readline snake_case_ : Dict = datasets.utils.logging.get_logger(__name__) @dataclass class __a (datasets.BuilderConfig ): __a : Optional[datasets.Features] = None __a : str = "utf-8" __a : Optional[str] = None __a : Optional[str] = None __a : bool = True # deprecated __a : Optional[int] = None # deprecated __a : int = 10 << 20 # 10MB __a : Optional[bool] = None class __a (datasets.ArrowBasedBuilder ): __a : Optional[int] = JsonConfig def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" if self.config.block_size is not None: logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' ) UpperCAmelCase_ : Optional[int] = self.config.block_size if self.config.use_threads is not True: logger.warning( '''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' ) if self.config.newlines_in_values is not None: raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' ) return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[str] ) -> Tuple: """simple docstring""" if not self.config.data_files: raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) UpperCAmelCase_ : int = dl_manager.download_and_extract(self.config.data_files ) if isinstance(__magic_name__ , (str, list, tuple) ): UpperCAmelCase_ : Tuple = data_files if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : Optional[int] = [files] UpperCAmelCase_ : Any = [dl_manager.iter_files(__magic_name__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] UpperCAmelCase_ : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(__magic_name__ , __magic_name__ ): UpperCAmelCase_ : List[Any] = [files] UpperCAmelCase_ : Any = [dl_manager.iter_files(__magic_name__ ) for file in files] splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : List[str] , __magic_name__ : pa.Table ) -> pa.Table: """simple docstring""" if self.config.features is not None: # adding missing columns for column_name in set(self.config.features ) - set(pa_table.column_names ): UpperCAmelCase_ : int = self.config.features.arrow_schema.field(__magic_name__ ).type UpperCAmelCase_ : Optional[int] = pa_table.append_column(__magic_name__ , pa.array([None] * len(__magic_name__ ) , type=__magic_name__ ) ) # more expensive cast to support nested structures with keys in a different order # allows str <-> int/float or str to Audio for example UpperCAmelCase_ : Optional[Any] = table_cast(__magic_name__ , self.config.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Tuple ) -> List[Any]: """simple docstring""" for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ): # If the file is one json object and if we need to look at the list of items in one specific field if self.config.field is not None: with open(__magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCAmelCase_ : List[str] = json.load(__magic_name__ ) # We keep only the field we are interested in UpperCAmelCase_ : Any = dataset[self.config.field] # We accept two format: a list of dicts or a dict of lists if isinstance(__magic_name__ , (list, tuple) ): UpperCAmelCase_ : int = set().union(*[row.keys() for row in dataset] ) UpperCAmelCase_ : List[Any] = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} else: UpperCAmelCase_ : Optional[int] = dataset UpperCAmelCase_ : int = pa.Table.from_pydict(__magic_name__ ) yield file_idx, self._cast_table(__magic_name__ ) # If the file has one json object per line else: with open(__magic_name__ , '''rb''' ) as f: UpperCAmelCase_ : Optional[Any] = 0 # Use block_size equal to the chunk size divided by 32 to leverage multithreading # Set a default minimum value of 16kB if the chunk size is really small UpperCAmelCase_ : List[Any] = max(self.config.chunksize // 32 , 16 << 10 ) UpperCAmelCase_ : List[Any] = ( self.config.encoding_errors if self.config.encoding_errors is not None else '''strict''' ) while True: UpperCAmelCase_ : List[Any] = f.read(self.config.chunksize ) if not batch: break # Finish current line try: batch += f.readline() except (AttributeError, io.UnsupportedOperation): batch += readline(__magic_name__ ) # PyArrow only accepts utf-8 encoded bytes if self.config.encoding != "utf-8": UpperCAmelCase_ : List[Any] = batch.decode(self.config.encoding , errors=__magic_name__ ).encode('''utf-8''' ) try: while True: try: UpperCAmelCase_ : Union[str, Any] = paj.read_json( io.BytesIO(__magic_name__ ) , read_options=paj.ReadOptions(block_size=__magic_name__ ) ) break except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e: if ( isinstance(__magic_name__ , pa.ArrowInvalid ) and "straddling" not in str(__magic_name__ ) or block_size > len(__magic_name__ ) ): raise else: # Increase the block size in case it was too small. # The block size will be reset for the next file. logger.debug( F"""Batch of {len(__magic_name__ )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" ) block_size *= 2 except pa.ArrowInvalid as e: try: with open( __magic_name__ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f: UpperCAmelCase_ : int = json.load(__magic_name__ ) except json.JSONDecodeError: logger.error(F"""Failed to read file '{file}' with error {type(__magic_name__ )}: {e}""" ) raise e # If possible, parse the file as a list of json objects and exit the loop if isinstance(__magic_name__ , __magic_name__ ): # list is the only sequence type supported in JSON try: UpperCAmelCase_ : str = set().union(*[row.keys() for row in dataset] ) UpperCAmelCase_ : Optional[int] = {col: [row.get(__magic_name__ ) for row in dataset] for col in keys} UpperCAmelCase_ : Union[str, Any] = pa.Table.from_pydict(__magic_name__ ) except (pa.ArrowInvalid, AttributeError) as e: logger.error(F"""Failed to read file '{file}' with error {type(__magic_name__ )}: {e}""" ) raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None yield file_idx, self._cast_table(__magic_name__ ) break else: logger.error(F"""Failed to read file '{file}' with error {type(__magic_name__ )}: {e}""" ) raise ValueError( F"""Not able to read records in the JSON file at {file}. """ F"""You should probably indicate the field of the JSON file containing your records. """ F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """ F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(__magic_name__ ) batch_idx += 1
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Tuple = { "configuration_informer": [ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "InformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Any = [ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "InformerForPrediction", "InformerModel", "InformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys snake_case_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Dict = { "configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : str = [ "SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST", "Swinv2ForImageClassification", "Swinv2ForMaskedImageModeling", "Swinv2Model", "Swinv2PreTrainedModel", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys snake_case_ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1
'''simple docstring''' class __a : def __init__( self : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = name UpperCAmelCase_ : Union[str, Any] = val def __str__( self : str ) -> Tuple: """simple docstring""" return F"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : int , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.val < other.val class __a : def __init__( self : Dict , __magic_name__ : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Tuple = {} UpperCAmelCase_ : Any = {} UpperCAmelCase_ : Optional[Any] = self.build_heap(__magic_name__ ) def __getitem__( self : Tuple , __magic_name__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" return self.get_value(__magic_name__ ) def UpperCAmelCase__ ( self : int , __magic_name__ : List[Any] ) -> Any: """simple docstring""" return (idx - 1) // 2 def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return idx * 2 + 1 def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" return idx * 2 + 2 def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> List[str]: """simple docstring""" return self.heap_dict[key] def UpperCAmelCase__ ( self : str , __magic_name__ : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : int = len(__magic_name__ ) - 1 UpperCAmelCase_ : Dict = self.get_parent_idx(__magic_name__ ) for idx, i in enumerate(__magic_name__ ): UpperCAmelCase_ : Dict = idx UpperCAmelCase_ : Tuple = i.val for i in range(__magic_name__ , -1 , -1 ): self.sift_down(__magic_name__ , __magic_name__ ) return array def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> List[Any]: """simple docstring""" while True: UpperCAmelCase_ : Optional[int] = self.get_left_child_idx(__magic_name__ ) # noqa: E741 UpperCAmelCase_ : Tuple = self.get_right_child_idx(__magic_name__ ) UpperCAmelCase_ : str = idx if l < len(__magic_name__ ) and array[l] < array[idx]: UpperCAmelCase_ : str = l if r < len(__magic_name__ ) and array[r] < array[smallest]: UpperCAmelCase_ : Tuple = r if smallest != idx: UpperCAmelCase_ , UpperCAmelCase_ : Dict = array[smallest], array[idx] ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase_ : str = smallest else: break def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.get_parent_idx(__magic_name__ ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.heap[idx], self.heap[p] UpperCAmelCase_ , UpperCAmelCase_ : Dict = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase_ : int = p UpperCAmelCase_ : Union[str, Any] = self.get_parent_idx(__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Any: """simple docstring""" return self.heap[0] def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.heap[-1], self.heap[0] UpperCAmelCase_ , UpperCAmelCase_ : Dict = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase_ : Dict = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" self.heap.append(__magic_name__ ) UpperCAmelCase_ : str = len(self.heap ) - 1 UpperCAmelCase_ : Any = node.val self.sift_up(len(self.heap ) - 1 ) def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" return len(self.heap ) == 0 def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase_ : Any = new_value UpperCAmelCase_ : List[Any] = new_value self.sift_up(self.idx_of_element[node] ) snake_case_ : Optional[Any] = Node("R", -1) snake_case_ : Union[str, Any] = Node("B", 6) snake_case_ : List[str] = Node("A", 3) snake_case_ : Optional[Any] = Node("X", 1) snake_case_ : List[str] = Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array snake_case_ : Tuple = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
1
'''simple docstring''' import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import numpy as np from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") snake_case_ : str = logging.getLogger(__name__) @dataclass class __a : __a : Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __a : bool = field( default=lowerCamelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) __a : bool = field( default=lowerCamelCase , metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) } , ) __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) __a : Optional[int] = field( default=lowerCamelCase , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) } , ) @dataclass class __a : __a : str = field( default=lowerCamelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __a : str = field( default=lowerCamelCase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Train language if it is different from the evaluation language."} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) __a : Optional[bool] = field( default=lowerCamelCase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , ) __a : bool = field( default=lowerCamelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) __a : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) __a : bool = field( default=lowerCamelCase , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) __a : bool = field( default=lowerCamelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def lowerCamelCase_ ( ) -> Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_xnli''', SCREAMING_SNAKE_CASE__ ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() UpperCAmelCase_ : Optional[Any] = training_args.get_process_log_level() logger.setLevel(SCREAMING_SNAKE_CASE__ ) datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. UpperCAmelCase_ : Optional[int] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: UpperCAmelCase_ : Dict = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Set seed before initializing model. set_seed(training_args.seed ) # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # Downloading and loading xnli dataset from the hub. if training_args.do_train: if model_args.train_language is None: UpperCAmelCase_ : Any = load_dataset( '''xnli''', model_args.language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: UpperCAmelCase_ : Union[str, Any] = load_dataset( '''xnli''', model_args.train_language, split='''train''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) UpperCAmelCase_ : Optional[Any] = train_dataset.features['''label'''].names if training_args.do_eval: UpperCAmelCase_ : Dict = load_dataset( '''xnli''', model_args.language, split='''validation''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) UpperCAmelCase_ : Union[str, Any] = eval_dataset.features['''label'''].names if training_args.do_predict: UpperCAmelCase_ : Any = load_dataset( '''xnli''', model_args.language, split='''test''', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) UpperCAmelCase_ : List[Any] = predict_dataset.features['''label'''].names # Labels UpperCAmelCase_ : List[Any] = len(SCREAMING_SNAKE_CASE__ ) # Load pretrained model and tokenizer # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : Dict = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=SCREAMING_SNAKE_CASE__, idalabel={str(SCREAMING_SNAKE_CASE__ ): label for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}, labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}, finetuning_task='''xnli''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) UpperCAmelCase_ : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the datasets # Padding strategy if data_args.pad_to_max_length: UpperCAmelCase_ : Dict = '''max_length''' else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch UpperCAmelCase_ : Any = False def preprocess_function(SCREAMING_SNAKE_CASE__ : Tuple ): # Tokenize the texts return tokenizer( examples['''premise'''], examples['''hypothesis'''], padding=SCREAMING_SNAKE_CASE__, max_length=data_args.max_seq_length, truncation=SCREAMING_SNAKE_CASE__, ) if training_args.do_train: if data_args.max_train_samples is not None: UpperCAmelCase_ : List[Any] = min(len(SCREAMING_SNAKE_CASE__ ), data_args.max_train_samples ) UpperCAmelCase_ : Union[str, Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) with training_args.main_process_first(desc='''train dataset map pre-processing''' ): UpperCAmelCase_ : Optional[Any] = train_dataset.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on train dataset''', ) # Log a few random samples from the training set: for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ), 3 ): logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" ) if training_args.do_eval: if data_args.max_eval_samples is not None: UpperCAmelCase_ : str = min(len(SCREAMING_SNAKE_CASE__ ), data_args.max_eval_samples ) UpperCAmelCase_ : Optional[int] = eval_dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) with training_args.main_process_first(desc='''validation dataset map pre-processing''' ): UpperCAmelCase_ : Optional[Any] = eval_dataset.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on validation dataset''', ) if training_args.do_predict: if data_args.max_predict_samples is not None: UpperCAmelCase_ : Optional[int] = min(len(SCREAMING_SNAKE_CASE__ ), data_args.max_predict_samples ) UpperCAmelCase_ : int = predict_dataset.select(range(SCREAMING_SNAKE_CASE__ ) ) with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ): UpperCAmelCase_ : List[Any] = predict_dataset.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, load_from_cache_file=not data_args.overwrite_cache, desc='''Running tokenizer on prediction dataset''', ) # Get the metric function UpperCAmelCase_ : Optional[Any] = evaluate.load('''xnli''' ) # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ): UpperCAmelCase_ : Dict = p.predictions[0] if isinstance(p.predictions, SCREAMING_SNAKE_CASE__ ) else p.predictions UpperCAmelCase_ : Dict = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 ) return metric.compute(predictions=SCREAMING_SNAKE_CASE__, references=p.label_ids ) # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: UpperCAmelCase_ : Dict = default_data_collator elif training_args.fpaa: UpperCAmelCase_ : Optional[int] = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__, pad_to_multiple_of=8 ) else: UpperCAmelCase_ : Dict = None # Initialize our Trainer UpperCAmelCase_ : int = Trainer( model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__, data_collator=SCREAMING_SNAKE_CASE__, ) # Training if training_args.do_train: UpperCAmelCase_ : str = None if training_args.resume_from_checkpoint is not None: UpperCAmelCase_ : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: UpperCAmelCase_ : Dict = last_checkpoint UpperCAmelCase_ : str = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = train_result.metrics UpperCAmelCase_ : Optional[int] = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : Optional[int] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics('''train''', SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('''train''', SCREAMING_SNAKE_CASE__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : int = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ) trainer.log_metrics('''eval''', SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('''eval''', SCREAMING_SNAKE_CASE__ ) # Prediction if training_args.do_predict: logger.info('''*** Predict ***''' ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = trainer.predict(SCREAMING_SNAKE_CASE__, metric_key_prefix='''predict''' ) UpperCAmelCase_ : List[str] = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : List[str] = min(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ) trainer.log_metrics('''predict''', SCREAMING_SNAKE_CASE__ ) trainer.save_metrics('''predict''', SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = np.argmax(SCREAMING_SNAKE_CASE__, axis=1 ) UpperCAmelCase_ : Optional[Any] = os.path.join(training_args.output_dir, '''predictions.txt''' ) if trainer.is_world_process_zero(): with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer: writer.write('''index\tprediction\n''' ) for index, item in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Any = label_list[item] writer.write(F"""{index}\t{item}\n""" ) if __name__ == "__main__": main()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase_ : Optional[Any] = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=__magic_name__ , ) assert hasattr(self , '''env''' ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str]=1 ) -> Optional[int]: """simple docstring""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=__magic_name__ , instance_type=self.instance_type , debugger_hook_config=__magic_name__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[Any] ) -> Optional[Any]: """simple docstring""" TrainingJobAnalytics(__magic_name__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" # create estimator UpperCAmelCase_ : Dict = self.create_estimator() # run training estimator.fit() # result dataframe UpperCAmelCase_ : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis UpperCAmelCase_ : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) UpperCAmelCase_ : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping UpperCAmelCase_ : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __magic_name__ )
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' import math def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : Union[str, Any] = input('''Enter message: ''' ) UpperCAmelCase_ : List[str] = int(input(F"""Enter key [2-{len(SCREAMING_SNAKE_CASE__ ) - 1}]: """ ) ) UpperCAmelCase_ : Any = input('''Encryption/Decryption [e/d]: ''' ) if mode.lower().startswith('''e''' ): UpperCAmelCase_ : Optional[int] = encrypt_message(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) elif mode.lower().startswith('''d''' ): UpperCAmelCase_ : int = decrypt_message(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F"""Output:\n{text + "|"}""" ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str ) -> str: UpperCAmelCase_ : Optional[int] = [''''''] * key for col in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[Any] = col while pointer < len(SCREAMING_SNAKE_CASE__ ): cipher_text[col] += message[pointer] pointer += key return "".join(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : str ) -> str: UpperCAmelCase_ : Union[str, Any] = math.ceil(len(SCREAMING_SNAKE_CASE__ ) / key ) UpperCAmelCase_ : int = key UpperCAmelCase_ : Optional[int] = (num_cols * num_rows) - len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = [''''''] * num_cols UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Optional[int] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): UpperCAmelCase_ : Any = 0 row += 1 return "".join(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' from __future__ import annotations snake_case_ : Any = 8.988E9 # units = N * m^s * C^-2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float ) -> dict[str, float]: UpperCAmelCase_ : int = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if distance < 0: raise ValueError('''Distance cannot be negative''' ) if force == 0: UpperCAmelCase_ : List[Any] = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: UpperCAmelCase_ : Optional[Any] = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: UpperCAmelCase_ : List[str] = abs(SCREAMING_SNAKE_CASE__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: UpperCAmelCase_ : Optional[int] = (COULOMBS_CONSTANT * charge_product / abs(SCREAMING_SNAKE_CASE__ )) ** 0.5 return {"distance": distance} raise ValueError('''Exactly one argument must be 0''' ) if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' from __future__ import annotations from typing import Any class __a : def __init__( self : List[str] , __magic_name__ : int = 6 ) -> None: """simple docstring""" UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None self.create_linked_list(__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node() UpperCAmelCase_ : Optional[int] = current_node UpperCAmelCase_ : int = current_node UpperCAmelCase_ : Optional[int] = current_node for _ in range(1 , __magic_name__ ): UpperCAmelCase_ : str = Node() UpperCAmelCase_ : Dict = current_node UpperCAmelCase_ : Union[str, Any] = previous_node UpperCAmelCase_ : List[str] = current_node UpperCAmelCase_ : List[Any] = self.front UpperCAmelCase_ : Union[str, Any] = previous_node def UpperCAmelCase__ ( self : str ) -> bool: """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCAmelCase__ ( self : List[str] ) -> Any | None: """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def UpperCAmelCase__ ( self : Any , __magic_name__ : Any ) -> None: """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): UpperCAmelCase_ : Any = self.rear.next if self.rear: UpperCAmelCase_ : List[str] = data def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: UpperCAmelCase_ : Any = self.front.data UpperCAmelCase_ : Optional[Any] = None return data UpperCAmelCase_ : Dict = self.front UpperCAmelCase_ : Dict = old_front.next UpperCAmelCase_ : Optional[Any] = old_front.data UpperCAmelCase_ : List[str] = None return data def UpperCAmelCase__ ( self : Tuple ) -> None: """simple docstring""" if self.is_empty(): raise Exception('''Empty Queue''' ) def UpperCAmelCase__ ( self : str ) -> None: """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class __a : def __init__( self : Optional[int] ) -> None: """simple docstring""" UpperCAmelCase_ : Any | None = None UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' # Imports import numpy as np class __a : def __init__( self : List[Any] , __magic_name__ : Optional[Any]=None , __magic_name__ : int=None , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : Optional[int]=None ) -> str: """simple docstring""" self.set_matricies(red=__magic_name__ , green=__magic_name__ , blue=__magic_name__ , red_edge=__magic_name__ , nir=__magic_name__ ) def UpperCAmelCase__ ( self : int , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : List[str]=None , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None ) -> Tuple: """simple docstring""" if red is not None: UpperCAmelCase_ : Any = red if green is not None: UpperCAmelCase_ : Union[str, Any] = green if blue is not None: UpperCAmelCase_ : Optional[Any] = blue if red_edge is not None: UpperCAmelCase_ : str = red_edge if nir is not None: UpperCAmelCase_ : List[str] = nir return True def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str]="" , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=None , __magic_name__ : Union[str, Any]=None , __magic_name__ : str=None ) -> Dict: """simple docstring""" self.set_matricies(red=__magic_name__ , green=__magic_name__ , blue=__magic_name__ , red_edge=__magic_name__ , nir=__magic_name__ ) UpperCAmelCase_ : List[str] = { '''ARVI2''': self.arvaa, '''CCCI''': self.ccci, '''CVI''': self.cvi, '''GLI''': self.gli, '''NDVI''': self.ndvi, '''BNDVI''': self.bndvi, '''redEdgeNDVI''': self.red_edge_ndvi, '''GNDVI''': self.gndvi, '''GBNDVI''': self.gbndvi, '''GRNDVI''': self.grndvi, '''RBNDVI''': self.rbndvi, '''PNDVI''': self.pndvi, '''ATSAVI''': self.atsavi, '''BWDRVI''': self.bwdrvi, '''CIgreen''': self.ci_green, '''CIrededge''': self.ci_rededge, '''CI''': self.ci, '''CTVI''': self.ctvi, '''GDVI''': self.gdvi, '''EVI''': self.evi, '''GEMI''': self.gemi, '''GOSAVI''': self.gosavi, '''GSAVI''': self.gsavi, '''Hue''': self.hue, '''IVI''': self.ivi, '''IPVI''': self.ipvi, '''I''': self.i, '''RVI''': self.rvi, '''MRVI''': self.mrvi, '''MSAVI''': self.m_savi, '''NormG''': self.norm_g, '''NormNIR''': self.norm_nir, '''NormR''': self.norm_r, '''NGRDI''': self.ngrdi, '''RI''': self.ri, '''S''': self.s, '''IF''': self._if, '''DVI''': self.dvi, '''TVI''': self.tvi, '''NDRE''': self.ndre, } try: return funcs[index]() except KeyError: print('''Index not in the list!''' ) return False def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red))) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" return self.nir * (self.red / (self.green**2)) def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def UpperCAmelCase__ ( self : Tuple ) -> Dict: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any]=0.0_8 , __magic_name__ : Any=1.2_2 , __magic_name__ : Union[str, Any]=0.0_3 ) -> Optional[int]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" return (self.nir / self.green) - 1 def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" return (self.red - self.blue) / self.red def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Any = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" return self.nir - self.green def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[Any]=0.1_6 ) -> List[Any]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str]=0.5 ) -> Optional[int]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str=None , __magic_name__ : Optional[int]=None ) -> Any: """simple docstring""" return (self.nir - b) / (a * self.red) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" return (self.red + self.green + self.blue) / 3_0.5 def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" return self.nir / self.red def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" return self.green / (self.nir + self.red + self.green) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" return self.red / (self.nir + self.red + self.green) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Tuple = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) UpperCAmelCase_ : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def UpperCAmelCase__ ( self : List[str] ) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def UpperCAmelCase__ ( self : Dict ) -> List[Any]: """simple docstring""" return self.nir / self.red def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' import torch from diffusers import StableDiffusionPipeline snake_case_ : int = "path-to-your-trained-model" snake_case_ : Optional[Any] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda") snake_case_ : int = "A photo of sks dog in a bucket" snake_case_ : Optional[Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class __a (lowerCamelCase ): __a : Dict = ["image_processor", "tokenizer"] __a : Optional[Any] = "BlipImageProcessor" __a : int = "AutoTokenizer" def __init__( self : int , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Tuple: """simple docstring""" super().__init__(__magic_name__ , __magic_name__ ) # add QFormer tokenizer UpperCAmelCase_ : Union[str, Any] = qformer_tokenizer def __call__( self : Union[str, Any] , __magic_name__ : ImageInput = None , __magic_name__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = None , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Any , ) -> BatchFeature: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify at least images or text.''' ) UpperCAmelCase_ : Dict = BatchFeature() if text is not None: UpperCAmelCase_ : Optional[int] = self.tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) encoding.update(__magic_name__ ) UpperCAmelCase_ : List[str] = self.qformer_tokenizer( text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_token_type_ids=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) UpperCAmelCase_ : int = qformer_text_encoding.pop('''input_ids''' ) UpperCAmelCase_ : Union[str, Any] = qformer_text_encoding.pop('''attention_mask''' ) if images is not None: UpperCAmelCase_ : Optional[Any] = self.image_processor(__magic_name__ , return_tensors=__magic_name__ ) encoding.update(__magic_name__ ) return encoding def UpperCAmelCase__ ( self : Tuple , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[Any] ) -> Optional[int]: """simple docstring""" return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , *__magic_name__ : Optional[int] , **__magic_name__ : Optional[Any] ) -> List[str]: """simple docstring""" return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Any = self.tokenizer.model_input_names UpperCAmelCase_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Dict , **__magic_name__ : Tuple ) -> int: """simple docstring""" if os.path.isfile(__magic_name__ ): raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(__magic_name__ , exist_ok=__magic_name__ ) UpperCAmelCase_ : List[str] = os.path.join(__magic_name__ , '''qformer_tokenizer''' ) self.qformer_tokenizer.save_pretrained(__magic_name__ ) return super().save_pretrained(__magic_name__ , **__magic_name__ ) @classmethod def UpperCAmelCase__ ( cls : List[Any] , __magic_name__ : int , **__magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(__magic_name__ , subfolder='''qformer_tokenizer''' ) UpperCAmelCase_ : Optional[Any] = cls._get_arguments_from_pretrained(__magic_name__ , **__magic_name__ ) args.append(__magic_name__ ) return cls(*__magic_name__ )
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_xlnet import XLNetTokenizer else: snake_case_ : int = None snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} snake_case_ : Any = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", }, "tokenizer_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json", }, } snake_case_ : Tuple = { "xlnet-base-cased": None, "xlnet-large-cased": None, } snake_case_ : Optional[int] = "▁" # Segments (not really needed) snake_case_ : Dict = 0 snake_case_ : List[str] = 1 snake_case_ : List[Any] = 2 snake_case_ : Any = 3 snake_case_ : Optional[int] = 4 class __a (lowerCamelCase ): __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Any = PRETRAINED_VOCAB_FILES_MAP __a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Union[str, Any] = "left" __a : List[Any] = XLNetTokenizer def __init__( self : List[str] , __magic_name__ : List[str]=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=False , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=False , __magic_name__ : Any="<s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : List[str]="<unk>" , __magic_name__ : Optional[Any]="<sep>" , __magic_name__ : Optional[Any]="<pad>" , __magic_name__ : Optional[int]="<cls>" , __magic_name__ : Tuple="<mask>" , __magic_name__ : str=["<eop>", "<eod>"] , **__magic_name__ : int , ) -> Optional[Any]: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , ) UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Any = do_lower_case UpperCAmelCase_ : Union[str, Any] = remove_space UpperCAmelCase_ : List[Any] = keep_accents UpperCAmelCase_ : List[str] = vocab_file UpperCAmelCase_ : Tuple = False if not self.vocab_file else True def UpperCAmelCase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : int = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__magic_name__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : Union[str, Any] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ): copyfile(self.vocab_file , __magic_name__ ) return (out_vocab_file,)
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list[float]] ) -> list[list[float]]: UpperCAmelCase_ : list[list[float]] = [] for data in source_data: for i, el in enumerate(SCREAMING_SNAKE_CASE__ ): if len(SCREAMING_SNAKE_CASE__ ) < i + 1: data_lists.append([] ) data_lists[i].append(float(SCREAMING_SNAKE_CASE__ ) ) return data_lists def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list[float]], SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[float]]: UpperCAmelCase_ : list[list[float]] = [] for dlist, weight in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Union[str, Any] = min(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = max(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : list[float] = [] # for weight 0 score is 1 - actual score if weight == 0: for item in dlist: try: score.append(1 - ((item - mind) / (maxd - mind)) ) except ZeroDivisionError: score.append(1 ) elif weight == 1: for item in dlist: try: score.append((item - mind) / (maxd - mind) ) except ZeroDivisionError: score.append(0 ) # weight not 0 or 1 else: UpperCAmelCase_ : Union[str, Any] = F"""Invalid weight of {weight:f} provided""" raise ValueError(SCREAMING_SNAKE_CASE__ ) score_lists.append(SCREAMING_SNAKE_CASE__ ) return score_lists def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list[float]] ) -> list[float]: UpperCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )] for slist in score_lists: for j, ele in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[Any] = final_scores[j] + ele return final_scores def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list[float]], SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[float]]: UpperCAmelCase_ : Union[str, Any] = get_data(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = calculate_each_score(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = generate_final_scores(SCREAMING_SNAKE_CASE__ ) # append scores to source data for i, ele in enumerate(SCREAMING_SNAKE_CASE__ ): source_data[i].append(SCREAMING_SNAKE_CASE__ ) return source_data
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging snake_case_ : Any = logging.get_logger(__name__) snake_case_ : Optional[int] = { "facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json", } class __a (lowerCamelCase ): __a : str = "nllb-moe" __a : List[Any] = ["past_key_values"] __a : int = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Any , __magic_name__ : Dict=12_81_12 , __magic_name__ : List[str]=10_24 , __magic_name__ : Optional[Any]=12 , __magic_name__ : str=40_96 , __magic_name__ : List[str]=16 , __magic_name__ : Optional[int]=12 , __magic_name__ : str=40_96 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Optional[Any]=0.0_5 , __magic_name__ : Optional[int]=0.0_5 , __magic_name__ : Optional[int]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]="relu" , __magic_name__ : Dict=10_24 , __magic_name__ : Tuple=0.1 , __magic_name__ : Optional[Any]=0.1 , __magic_name__ : Optional[int]=0.0 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Dict=2 , __magic_name__ : Any=True , __magic_name__ : List[Any]=False , __magic_name__ : Dict="float32" , __magic_name__ : str=False , __magic_name__ : int=1_28 , __magic_name__ : Optional[int]=64 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=4 , __magic_name__ : List[str]=0.0_0_1 , __magic_name__ : Dict=0.0_0_1 , __magic_name__ : Tuple="all" , __magic_name__ : Any=False , __magic_name__ : Any=False , __magic_name__ : Union[str, Any]=1.0 , __magic_name__ : List[str]=0.2 , __magic_name__ : str=1 , __magic_name__ : int=0 , __magic_name__ : int=2 , __magic_name__ : Dict=False , **__magic_name__ : int , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : int = d_model UpperCAmelCase_ : Dict = encoder_ffn_dim UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Union[str, Any] = encoder_attention_heads UpperCAmelCase_ : int = decoder_ffn_dim UpperCAmelCase_ : str = decoder_layers UpperCAmelCase_ : Dict = decoder_attention_heads UpperCAmelCase_ : Any = dropout UpperCAmelCase_ : Tuple = attention_dropout UpperCAmelCase_ : Dict = activation_dropout UpperCAmelCase_ : str = activation_function UpperCAmelCase_ : Union[str, Any] = init_std UpperCAmelCase_ : Union[str, Any] = encoder_layerdrop UpperCAmelCase_ : List[Any] = decoder_layerdrop UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : int = encoder_layers UpperCAmelCase_ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase_ : List[str] = router_z_loss_coef UpperCAmelCase_ : Optional[Any] = router_aux_loss_coef UpperCAmelCase_ : str = decoder_sparse_step UpperCAmelCase_ : int = encoder_sparse_step UpperCAmelCase_ : int = num_experts UpperCAmelCase_ : Optional[int] = expert_capacity UpperCAmelCase_ : Union[str, Any] = router_bias if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" ) UpperCAmelCase_ : List[str] = router_dtype UpperCAmelCase_ : List[str] = router_ignore_padding_tokens UpperCAmelCase_ : Dict = batch_prioritized_routing UpperCAmelCase_ : Optional[int] = second_expert_policy UpperCAmelCase_ : int = normalize_router_prob_before_dropping UpperCAmelCase_ : int = moe_eval_capacity_token_fraction UpperCAmelCase_ : Union[str, Any] = moe_token_dropout UpperCAmelCase_ : Optional[int] = output_router_logits super().__init__( pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' from __future__ import annotations import typing from collections.abc import Iterable import numpy as np snake_case_ : Optional[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 snake_case_ : Union[str, Any] = typing.Union[np.floataa, int, float] # noqa: UP007 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Vector, SCREAMING_SNAKE_CASE__ : Vector ) -> VectorOut: return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE__ ) - np.asarray(SCREAMING_SNAKE_CASE__ )) ** 2 ) ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Vector, SCREAMING_SNAKE_CASE__ : Vector ) -> VectorOut: return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) ** (1 / 2) if __name__ == "__main__": def lowerCamelCase_ ( ) -> None: from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''', number=10000, globals=globals(), ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''', number=10000, globals=globals(), ) ) benchmark()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' import requests def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> None: UpperCAmelCase_ : List[str] = {'''Content-Type''': '''application/json'''} UpperCAmelCase_ : Optional[Any] = requests.post(SCREAMING_SNAKE_CASE__, json={'''text''': message_body}, headers=SCREAMING_SNAKE_CASE__ ) if response.status_code != 200: UpperCAmelCase_ : str = ( '''Request to slack returned an error ''' F"""{response.status_code}, the response is:\n{response.text}""" ) raise ValueError(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": # Set the slack url to the one provided by Slack when you create the webhook at # https://my.slack.com/services/new/incoming-webhook/ send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __a : def __init__( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=13 , __magic_name__ : Optional[Any]=30 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[int]=True , __magic_name__ : Tuple=32 , __magic_name__ : str=5 , __magic_name__ : Optional[Any]=4 , __magic_name__ : List[str]=37 , __magic_name__ : List[Any]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : int=0.1 , __magic_name__ : Union[str, Any]=10 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Any=None , ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : int = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : str = use_labels UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Any = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase_ : List[Any] = num_patches + 1 def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = None if self.use_labels: UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = ViTMSNModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Any , __magic_name__ : str , __magic_name__ : str ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.type_sequence_label_size UpperCAmelCase_ : str = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , labels=__magic_name__ ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : List[Any] = ViTMSNForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs UpperCAmelCase_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () __a : str = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) __a : Union[str, Any] = False __a : Optional[int] = False __a : Any = False __a : str = False def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : int = ViTMSNModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" pass def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = model_class(__magic_name__ ) UpperCAmelCase_ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Optional[int] = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = ViTMSNModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __a (unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" torch.manual_seed(2 ) UpperCAmelCase_ : List[str] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__magic_name__ ) UpperCAmelCase_ : Dict = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : Optional[Any] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(**__magic_name__ ) # verify the logits UpperCAmelCase_ : List[str] = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) UpperCAmelCase_ : str = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case_ : Optional[int] = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[Any] = ["PLBartTokenizer"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "PLBART_PRETRAINED_MODEL_ARCHIVE_LIST", "PLBartForCausalLM", "PLBartForConditionalGeneration", "PLBartForSequenceClassification", "PLBartModel", "PLBartPreTrainedModel", ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowerCamelCase_ ( ) -> int: UpperCAmelCase_ : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png''' UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE__, stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('''RGB''' ) return image def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: UpperCAmelCase_ : Any = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') ) # fmt: on return rename_keys def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : Union[str, Any], SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any: UpperCAmelCase_ : int = dct.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = val def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases UpperCAmelCase_ : Optional[int] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) UpperCAmelCase_ : Any = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict UpperCAmelCase_ : Dict = torch.cat((q_bias, torch.zeros_like(SCREAMING_SNAKE_CASE__, requires_grad=SCREAMING_SNAKE_CASE__ ), v_bias) ) UpperCAmelCase_ : Union[str, Any] = qkv_bias def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str: UpperCAmelCase_ : Optional[int] = 364 if '''coco''' in model_name else 224 UpperCAmelCase_ : List[str] = BlipaVisionConfig(image_size=SCREAMING_SNAKE_CASE__ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: UpperCAmelCase_ : Dict = OPTConfig.from_pretrained('''facebook/opt-2.7b''', eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict() elif "opt-6.7b" in model_name: UpperCAmelCase_ : int = OPTConfig.from_pretrained('''facebook/opt-6.7b''', eos_token_id=SCREAMING_SNAKE_CASE__ ).to_dict() elif "t5-xl" in model_name: UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: UpperCAmelCase_ : str = TaConfig.from_pretrained('''google/flan-t5-xxl''', dense_act_fn='''gelu''', bos_token_id=1 ).to_dict() UpperCAmelCase_ : str = BlipaConfig(vision_config=SCREAMING_SNAKE_CASE__, text_config=SCREAMING_SNAKE_CASE__ ) return config, image_size @torch.no_grad() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str]=None, SCREAMING_SNAKE_CASE__ : Tuple=False ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = ( AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' ) if '''opt''' in model_name else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' ) ) UpperCAmelCase_ : Union[str, Any] = tokenizer('''\n''', add_special_tokens=SCREAMING_SNAKE_CASE__ ).input_ids[0] UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_blipa_config(SCREAMING_SNAKE_CASE__, eos_token_id=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = BlipaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() UpperCAmelCase_ : Optional[int] = { '''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''), '''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''), '''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''), '''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''), '''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''), '''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''), '''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''), } UpperCAmelCase_ , UpperCAmelCase_ : List[str] = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) UpperCAmelCase_ : List[Any] = '''cuda''' if torch.cuda.is_available() else '''cpu''' UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_model_and_preprocess( name=SCREAMING_SNAKE_CASE__, model_type=SCREAMING_SNAKE_CASE__, is_eval=SCREAMING_SNAKE_CASE__, device=SCREAMING_SNAKE_CASE__ ) original_model.eval() print('''Done!''' ) # update state dict keys UpperCAmelCase_ : Union[str, Any] = original_model.state_dict() UpperCAmelCase_ : Dict = create_rename_keys(SCREAMING_SNAKE_CASE__ ) for src, dest in rename_keys: rename_key(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): UpperCAmelCase_ : int = state_dict.pop(SCREAMING_SNAKE_CASE__ ) if key.startswith('''Qformer.bert''' ): UpperCAmelCase_ : Optional[int] = key.replace('''Qformer.bert''', '''qformer''' ) if "attention.self" in key: UpperCAmelCase_ : Any = key.replace('''self''', '''attention''' ) if "opt_proj" in key: UpperCAmelCase_ : Tuple = key.replace('''opt_proj''', '''language_projection''' ) if "t5_proj" in key: UpperCAmelCase_ : int = key.replace('''t5_proj''', '''language_projection''' ) if key.startswith('''opt''' ): UpperCAmelCase_ : int = key.replace('''opt''', '''language''' ) if key.startswith('''t5''' ): UpperCAmelCase_ : Union[str, Any] = key.replace('''t5''', '''language''' ) UpperCAmelCase_ : str = val # read in qv biases read_in_q_v_bias(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = hf_model.load_state_dict(SCREAMING_SNAKE_CASE__, strict=SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] UpperCAmelCase_ : Optional[Any] = load_demo_image() UpperCAmelCase_ : Optional[int] = vis_processors['''eval'''](SCREAMING_SNAKE_CASE__ ).unsqueeze(0 ).to(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = tokenizer(['''\n'''], return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE__ ) # create processor UpperCAmelCase_ : Any = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size}, image_mean=SCREAMING_SNAKE_CASE__, image_std=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = processor(images=SCREAMING_SNAKE_CASE__, return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE__ ) # make sure processor creates exact same pixel values assert torch.allclose(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) original_model.to(SCREAMING_SNAKE_CASE__ ) hf_model.to(SCREAMING_SNAKE_CASE__ ) with torch.no_grad(): if "opt" in model_name: UpperCAmelCase_ : List[Any] = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits UpperCAmelCase_ : Any = hf_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).logits else: UpperCAmelCase_ : List[Any] = original_model( {'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits UpperCAmelCase_ : Dict = input_ids.masked_fill(input_ids == tokenizer.pad_token_id, -100 ) UpperCAmelCase_ : List[str] = hf_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, labels=SCREAMING_SNAKE_CASE__ ).logits assert original_logits.shape == logits.shape print('''First values of original logits:''', original_logits[0, :3, :3] ) print('''First values of HF logits:''', logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": UpperCAmelCase_ : Any = torch.tensor( [[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]], device=SCREAMING_SNAKE_CASE__ ) assert torch.allclose(logits[0, :3, :3], SCREAMING_SNAKE_CASE__, atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": UpperCAmelCase_ : Optional[Any] = torch.tensor( [[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]], device=SCREAMING_SNAKE_CASE__ ) else: # cast to same type UpperCAmelCase_ : List[str] = logits.dtype assert torch.allclose(original_logits.to(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__, atol=1E-2 ) print('''Looks ok!''' ) print('''Generating a caption...''' ) UpperCAmelCase_ : Tuple = '''''' UpperCAmelCase_ : Any = tokenizer(SCREAMING_SNAKE_CASE__, return_tensors='''pt''' ).input_ids.to(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} ) UpperCAmelCase_ : str = hf_model.generate( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, do_sample=SCREAMING_SNAKE_CASE__, num_beams=5, max_length=30, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1, ) print('''Original generation:''', SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = input_ids.shape[1] UpperCAmelCase_ : Tuple = processor.batch_decode(outputs[:, prompt_length:], skip_special_tokens=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = [text.strip() for text in output_text] print('''HF generation:''', SCREAMING_SNAKE_CASE__ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ ) if push_to_hub: processor.push_to_hub(F"""nielsr/{model_name}""" ) hf_model.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": snake_case_ : int = argparse.ArgumentParser() snake_case_ : int = [ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) snake_case_ : List[str] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' import os def lowerCamelCase_ ( ) -> str: with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + '''/p022_names.txt''' ) as file: UpperCAmelCase_ : Optional[int] = str(file.readlines()[0] ) UpperCAmelCase_ : Optional[int] = names.replace('''"''', '''''' ).split(''',''' ) names.sort() UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[str] = 0 for i, name in enumerate(SCREAMING_SNAKE_CASE__ ): for letter in name: name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64 total_score += (i + 1) * name_score UpperCAmelCase_ : Optional[int] = 0 return total_score if __name__ == "__main__": print(solution())
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer snake_case_ : Any = logging.get_logger(__name__) snake_case_ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} snake_case_ : Dict = { "vocab_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json" }, "merges_file": { "allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt" }, } snake_case_ : Optional[Any] = {"allegro/herbert-base-cased": 5_14} snake_case_ : Union[str, Any] = {} class __a (lowerCamelCase ): __a : Optional[Any] = VOCAB_FILES_NAMES __a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP __a : Dict = PRETRAINED_INIT_CONFIGURATION __a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = HerbertTokenizer def __init__( self : Optional[int] , __magic_name__ : List[Any]=None , __magic_name__ : Optional[int]=None , __magic_name__ : List[str]=None , __magic_name__ : int="<s>" , __magic_name__ : str="<unk>" , __magic_name__ : int="<pad>" , __magic_name__ : Any="<mask>" , __magic_name__ : Optional[Any]="</s>" , **__magic_name__ : Any , ) -> Union[str, Any]: """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sep_token=__magic_name__ , **__magic_name__ , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : Dict = [self.sep_token_id] UpperCAmelCase_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : int = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : int ) -> list[list[int]]: UpperCAmelCase_ : list[list[int]] = [] UpperCAmelCase_ : list[int] = [] UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : Tuple = sum(SCREAMING_SNAKE_CASE__ ) create_state_space_tree(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : list[list[int]], SCREAMING_SNAKE_CASE__ : int, ) -> None: if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum: return if sum(SCREAMING_SNAKE_CASE__ ) == max_sum: result.append(SCREAMING_SNAKE_CASE__ ) return for index in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): create_state_space_tree( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, index + 1, [*path, nums[index]], SCREAMING_SNAKE_CASE__, remaining_nums_sum - nums[index], ) snake_case_ : Optional[int] = [3, 34, 4, 12, 5, 2] snake_case_ : int = 9 snake_case_ : List[Any] = generate_sum_of_subsets_soln(nums, max_sum) print(*result)
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class __a : def __init__( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Any=13 , __magic_name__ : List[str]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[Any]=True , __magic_name__ : str=True , __magic_name__ : Dict=99 , __magic_name__ : Any=32 , __magic_name__ : Tuple=2 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=37 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Dict=2 , __magic_name__ : int=0.0_2 , __magic_name__ : List[str]=3 , __magic_name__ : List[Any]=4 , __magic_name__ : str=None , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = parent UpperCAmelCase_ : Optional[Any] = 13 UpperCAmelCase_ : List[str] = 7 UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : int = True UpperCAmelCase_ : Dict = 99 UpperCAmelCase_ : Dict = 32 UpperCAmelCase_ : Dict = 2 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : int = 37 UpperCAmelCase_ : Union[str, Any] = '''gelu''' UpperCAmelCase_ : str = 0.1 UpperCAmelCase_ : Optional[int] = 0.1 UpperCAmelCase_ : Union[str, Any] = 5_12 UpperCAmelCase_ : str = 16 UpperCAmelCase_ : Dict = 2 UpperCAmelCase_ : List[Any] = 0.0_2 UpperCAmelCase_ : Optional[Any] = 3 UpperCAmelCase_ : List[Any] = 4 UpperCAmelCase_ : Any = None def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = None if self.use_input_mask: UpperCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Optional[int] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : str = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Dict = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__magic_name__ , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFRoFormerModel(config=__magic_name__ ) UpperCAmelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask] UpperCAmelCase_ : Any = model(__magic_name__ ) UpperCAmelCase_ : str = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Dict , __magic_name__ : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Any = TFRoFormerForCausalLM(config=__magic_name__ ) UpperCAmelCase_ : Dict = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ : str = model(__magic_name__ )['''logits'''] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Dict = TFRoFormerForMaskedLM(config=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ : List[str] = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Optional[int] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Tuple = TFRoFormerForSequenceClassification(config=__magic_name__ ) UpperCAmelCase_ : Optional[int] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.num_choices UpperCAmelCase_ : List[Any] = TFRoFormerForMultipleChoice(config=__magic_name__ ) UpperCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) ) UpperCAmelCase_ : Optional[Any] = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } UpperCAmelCase_ : int = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : List[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = TFRoFormerForTokenClassification(config=__magic_name__ ) UpperCAmelCase_ : List[str] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ : Dict = model(__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Dict = TFRoFormerForQuestionAnswering(config=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = { '''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids, } UpperCAmelCase_ : Any = model(__magic_name__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" UpperCAmelCase_ : Dict = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) __a : str = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) __a : List[Any] = False __a : str = False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def UpperCAmelCase__ ( self : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : List[str] = TFRoFormerModelTester(self ) UpperCAmelCase_ : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : int ) -> int: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Optional[int] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' ) self.assertIsNotNone(__magic_name__ ) @require_tf class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' ) UpperCAmelCase_ : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : int = model(__magic_name__ )[0] # TODO Replace vocab size UpperCAmelCase_ : List[str] = 5_00_00 UpperCAmelCase_ : str = [1, 6, vocab_size] self.assertEqual(output.shape , __magic_name__ ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. UpperCAmelCase_ : str = tf.constant( [ [ [-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6], [-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7], [-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) @require_tf class __a (unittest.TestCase ): __a : int = 1e-4 def UpperCAmelCase__ ( self : str ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = tf.constant([[4, 10]] ) UpperCAmelCase_ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) UpperCAmelCase_ : str = emba(input_ids.shape ) UpperCAmelCase_ : str = tf.constant( [[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] ) tf.debugging.assert_near(__magic_name__ , __magic_name__ , atol=self.tolerance ) def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = tf.constant( [ [0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0], [0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7], [0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0], ] ) UpperCAmelCase_ : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 ) emba([2, 16, 5_12] ) UpperCAmelCase_ : Any = emba.weight[:3, :5] tf.debugging.assert_near(__magic_name__ , __magic_name__ , atol=self.tolerance ) @require_tf class __a (unittest.TestCase ): __a : List[str] = 1e-4 def UpperCAmelCase__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" # 2,12,16,64 UpperCAmelCase_ : str = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 UpperCAmelCase_ : str = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00 UpperCAmelCase_ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) UpperCAmelCase_ : Dict = embed_positions([2, 16, 7_68] )[None, None, :, :] UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( __magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tf.constant( [ [0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0], [-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3], [-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5], [-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1], [0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0], [3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3], ] ) UpperCAmelCase_ : Any = tf.constant( [ [0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0], [0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3], [1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5], [2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1], [-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0], [-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __magic_name__ , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __magic_name__ , atol=self.tolerance )
644
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int: return sum(e for e in range(3, SCREAMING_SNAKE_CASE__ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase_ : Optional[Any] = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while b: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = b, a % b return a def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE__, a % b ) def lowerCamelCase_ ( ) -> List[Any]: print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3, 5 )}""" ) print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5, 3 )}""" ) print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1, 3 )}""" ) print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3, 6 )}""" ) print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6, 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5 )}""" ) print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3 )}""" ) print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3 )}""" ) print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6 )}""" ) print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3 )}""" ) if __name__ == "__main__": main()
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' snake_case_ : List[Any] = 8.314_4598 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float ) -> float: if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example snake_case_ : int = 3_00 snake_case_ : Dict = 28 snake_case_ : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' from manim import * class __a (lowerCamelCase ): def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase_ : Any = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) UpperCAmelCase_ : Dict = [mem.copy() for i in range(6 )] UpperCAmelCase_ : int = [mem.copy() for i in range(6 )] UpperCAmelCase_ : int = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) UpperCAmelCase_ : Any = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) UpperCAmelCase_ : Dict = VGroup(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0 ) UpperCAmelCase_ : Tuple = Text('''CPU''' , font_size=24 ) UpperCAmelCase_ : int = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(4 )] UpperCAmelCase_ : int = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) UpperCAmelCase_ : str = Text('''GPU''' , font_size=24 ) UpperCAmelCase_ : Union[str, Any] = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ ) gpu.move_to([-1, -1, 0] ) self.add(__magic_name__ ) UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Dict = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) UpperCAmelCase_ : Union[str, Any] = Text('''Model''' , font_size=24 ) UpperCAmelCase_ : Union[str, Any] = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , buff=0.5 , aligned_edge=__magic_name__ ) model.move_to([3, -1.0, 0] ) self.add(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [] for i, rect in enumerate(__magic_name__ ): rect.set_stroke(__magic_name__ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) UpperCAmelCase_ : str = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__magic_name__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__magic_name__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=__magic_name__ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=__magic_name__ , buff=0.0 ) self.add(__magic_name__ ) cpu_targs.append(__magic_name__ ) UpperCAmelCase_ : Dict = [mem.copy() for i in range(6 )] UpperCAmelCase_ : List[Any] = VGroup(*__magic_name__ ).arrange(__magic_name__ , buff=0 ) UpperCAmelCase_ : List[Any] = Text('''Loaded Checkpoint''' , font_size=24 ) UpperCAmelCase_ : Dict = Group(__magic_name__ , __magic_name__ ).arrange(__magic_name__ , aligned_edge=__magic_name__ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) UpperCAmelCase_ : List[str] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase_ : List[Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : str = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__magic_name__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) UpperCAmelCase_ : Any = MarkupText( F"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__magic_name__ ) , Write(__magic_name__ ) ) self.play(Write(__magic_name__ , run_time=1 ) , Create(__magic_name__ , run_time=1 ) ) UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : List[str] = [] for i, rect in enumerate(__magic_name__ ): UpperCAmelCase_ : str = fill.copy().set_fill(__magic_name__ , opacity=0.7 ) target.move_to(__magic_name__ ) first_animations.append(GrowFromCenter(__magic_name__ , run_time=1 ) ) UpperCAmelCase_ : List[Any] = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(__magic_name__ , run_time=1.5 ) ) self.play(*__magic_name__ ) self.play(*__magic_name__ ) self.wait()
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) snake_case_ : Tuple = { "configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"], "tokenization_roberta": ["RobertaTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = ["RobertaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", "RobertaPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Dict = [ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForCausalLM", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaMainLayer", "TFRobertaModel", "TFRobertaPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = [ "FlaxRobertaForCausalLM", "FlaxRobertaForMaskedLM", "FlaxRobertaForMultipleChoice", "FlaxRobertaForQuestionAnswering", "FlaxRobertaForSequenceClassification", "FlaxRobertaForTokenClassification", "FlaxRobertaModel", "FlaxRobertaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' import os def lowerCamelCase_ ( ) -> str: with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + '''/grid.txt''' ) as f: UpperCAmelCase_ : Tuple = [] # noqa: E741 for _ in range(20 ): l.append([int(SCREAMING_SNAKE_CASE__ ) for x in f.readline().split()] ) UpperCAmelCase_ : Optional[Any] = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase_ : str = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ : Optional[Any] = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase_ : Optional[int] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ : Dict = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase_ : int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ : Tuple = temp # diagonal 2 for i in range(17 ): for j in range(3, 20 ): UpperCAmelCase_ : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ : Optional[int] = temp return maximum if __name__ == "__main__": print(solution())
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import math def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> bool: UpperCAmelCase_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float = 1 / 12345 ) -> int: UpperCAmelCase_ : str = 0 UpperCAmelCase_ : int = 0 UpperCAmelCase_ : int = 3 while True: UpperCAmelCase_ : str = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Tuple = int(SCREAMING_SNAKE_CASE__ ) total_partitions += 1 if check_partition_perfect(SCREAMING_SNAKE_CASE__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(SCREAMING_SNAKE_CASE__ ) integer += 1 if __name__ == "__main__": print(f'''{solution() = }''')
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' import logging import os import threading import time try: import warnings except ImportError: snake_case_ : Union[str, Any] = None try: import msvcrt except ImportError: snake_case_ : Optional[Any] = None try: import fcntl except ImportError: snake_case_ : int = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: snake_case_ : int = OSError # Data # ------------------------------------------------ snake_case_ : Optional[int] = [ "Timeout", "BaseFileLock", "WindowsFileLock", "UnixFileLock", "SoftFileLock", "FileLock", ] snake_case_ : int = "3.0.12" snake_case_ : Optional[Any] = None def lowerCamelCase_ ( ) -> str: global _logger UpperCAmelCase_ : Any = _logger or logging.getLogger(__name__ ) return _logger class __a (lowerCamelCase ): def __init__( self : int , __magic_name__ : List[str] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = lock_file return None def __str__( self : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Tuple = F"""The file lock '{self.lock_file}' could not be acquired.""" return temp class __a : def __init__( self : Optional[int] , __magic_name__ : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = lock return None def __enter__( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return self.lock def __exit__( self : str , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Tuple ) -> Dict: """simple docstring""" self.lock.release() return None class __a : def __init__( self : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str=-1 , __magic_name__ : Dict=None ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = max_filename_length if max_filename_length is not None else 2_55 # Hash the filename if it's too long UpperCAmelCase_ : List[Any] = self.hash_filename_if_too_long(__magic_name__ , __magic_name__ ) # The path to the lock file. UpperCAmelCase_ : str = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. UpperCAmelCase_ : int = None # The default timeout value. UpperCAmelCase_ : Union[str, Any] = timeout # We use this lock primarily for the lock counter. UpperCAmelCase_ : Tuple = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. UpperCAmelCase_ : int = 0 return None @property def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = float(__magic_name__ ) return None def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Optional[Any]=None , __magic_name__ : str=0.0_5 ) -> Optional[int]: """simple docstring""" # Use the default timeout, if no timeout is provided. if timeout is None: UpperCAmelCase_ : Optional[Any] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 UpperCAmelCase_ : Optional[Any] = id(self ) UpperCAmelCase_ : List[str] = self._lock_file UpperCAmelCase_ : List[str] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F"""Attempting to acquire lock {lock_id} on {lock_filename}""" ) self._acquire() if self.is_locked: logger().debug(F"""Lock {lock_id} acquired on {lock_filename}""" ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F"""Timeout on acquiring lock {lock_id} on {lock_filename}""" ) raise Timeout(self._lock_file ) else: logger().debug( F"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" ) time.sleep(__magic_name__ ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: UpperCAmelCase_ : Optional[int] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[Any]=False ) -> Tuple: """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: UpperCAmelCase_ : Optional[Any] = id(self ) UpperCAmelCase_ : Any = self._lock_file logger().debug(F"""Attempting to release lock {lock_id} on {lock_filename}""" ) self._release() UpperCAmelCase_ : Optional[Any] = 0 logger().debug(F"""Lock {lock_id} released on {lock_filename}""" ) return None def __enter__( self : Any ) -> int: """simple docstring""" self.acquire() return self def __exit__( self : Tuple , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict ) -> Dict: """simple docstring""" self.release() return None def __del__( self : int ) -> Optional[Any]: """simple docstring""" self.release(force=__magic_name__ ) return None def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ) -> str: """simple docstring""" UpperCAmelCase_ : Any = os.path.basename(__magic_name__ ) if len(__magic_name__ ) > max_length and max_length > 0: UpperCAmelCase_ : Dict = os.path.dirname(__magic_name__ ) UpperCAmelCase_ : int = str(hash(__magic_name__ ) ) UpperCAmelCase_ : Any = filename[: max_length - len(__magic_name__ ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(__magic_name__ , __magic_name__ ) else: return path class __a (lowerCamelCase ): def __init__( self : Optional[int] , __magic_name__ : int , __magic_name__ : Dict=-1 , __magic_name__ : Tuple=None ) -> Optional[int]: """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(__magic_name__ , timeout=__magic_name__ , max_filename_length=__magic_name__ ) UpperCAmelCase_ : List[str] = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: UpperCAmelCase_ : Union[str, Any] = os.open(self._lock_file , __magic_name__ ) except OSError: pass else: try: msvcrt.locking(__magic_name__ , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(__magic_name__ ) else: UpperCAmelCase_ : Any = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[int] = self._lock_file_fd UpperCAmelCase_ : Union[str, Any] = None msvcrt.locking(__magic_name__ , msvcrt.LK_UNLCK , 1 ) os.close(__magic_name__ ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __a (lowerCamelCase ): def __init__( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[str]=-1 , __magic_name__ : List[str]=None ) -> Any: """simple docstring""" UpperCAmelCase_ : int = os.statvfs(os.path.dirname(__magic_name__ ) ).f_namemax super().__init__(__magic_name__ , timeout=__magic_name__ , max_filename_length=__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : str = os.O_RDWR | os.O_CREAT | os.O_TRUNC UpperCAmelCase_ : Union[str, Any] = os.open(self._lock_file , __magic_name__ ) try: fcntl.flock(__magic_name__ , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(__magic_name__ ) else: UpperCAmelCase_ : str = fd return None def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" # Do not remove the lockfile: # # https://github.com/benediktschmitt/py-filelock/issues/31 # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition UpperCAmelCase_ : Optional[int] = self._lock_file_fd UpperCAmelCase_ : List[Any] = None fcntl.flock(__magic_name__ , fcntl.LOCK_UN ) os.close(__magic_name__ ) return None class __a (lowerCamelCase ): def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[str] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: UpperCAmelCase_ : Tuple = os.open(self._lock_file , __magic_name__ ) except OSError: pass else: UpperCAmelCase_ : Optional[Any] = fd return None def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" os.close(self._lock_file_fd ) UpperCAmelCase_ : Tuple = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None snake_case_ : List[str] = None if msvcrt: snake_case_ : int = WindowsFileLock elif fcntl: snake_case_ : Any = UnixFileLock else: snake_case_ : Dict = SoftFileLock if warnings is not None: warnings.warn("only soft file lock is available")
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : Dict = logging.get_logger(__name__) snake_case_ : Optional[int] = {"vocab_file": "spiece.model"} snake_case_ : List[str] = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } snake_case_ : Union[str, Any] = {"bert_for_seq_generation": 5_12} class __a (lowerCamelCase ): __a : Any = VOCAB_FILES_NAMES __a : Dict = PRETRAINED_VOCAB_FILES_MAP __a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : List[int] = [] __a : Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Dict , __magic_name__ : int , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[str]="<pad>" , __magic_name__ : Optional[int]="<::::>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : int , ) -> None: """simple docstring""" UpperCAmelCase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , sep_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : int = vocab_file UpperCAmelCase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) @property def UpperCAmelCase__ ( self : List[str] ) -> List[str]: """simple docstring""" return self.sp_model.get_piece_size() def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : List[str] = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Optional[int] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = self.__dict__.copy() UpperCAmelCase_ : List[str] = None return state def __setstate__( self : Any , __magic_name__ : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def UpperCAmelCase__ ( self : str , __magic_name__ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int ) -> List[str]: """simple docstring""" return self.sp_model.piece_to_id(__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = self.sp_model.IdToPiece(__magic_name__ ) return token def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase_ : int = [] UpperCAmelCase_ : Any = '''''' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(__magic_name__ ) + token UpperCAmelCase_ : Optional[int] = [] else: current_sub_tokens.append(__magic_name__ ) out_string += self.sp_model.decode(__magic_name__ ) return out_string.strip() def UpperCAmelCase__ ( self : int , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __magic_name__ ) elif not os.path.isfile(self.vocab_file ): with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (out_vocab_file,)
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2], unknown_args[1::2] )} def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[Any] = ArgumentParser( '''HuggingFace Datasets CLI tool''', usage='''datasets-cli <command> [<args>]''', allow_abbrev=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = parser.add_subparsers(help='''datasets-cli command helpers''' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) TestCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE__ ) # Parse args UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_known_args() if not hasattr(SCREAMING_SNAKE_CASE__, '''func''' ): parser.print_help() exit(1 ) UpperCAmelCase_ : int = parse_unknown_args(SCREAMING_SNAKE_CASE__ ) # Run UpperCAmelCase_ : Dict = args.func(SCREAMING_SNAKE_CASE__, **SCREAMING_SNAKE_CASE__ ) service.run() if __name__ == "__main__": main()
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __a : def __init__( self : str , __magic_name__ : List[str] , __magic_name__ : Optional[int]=2 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=16 , __magic_name__ : int=3 , __magic_name__ : Dict=True , __magic_name__ : List[str]=True , __magic_name__ : Tuple=32 , __magic_name__ : str=4 , __magic_name__ : List[Any]=[0, 1, 2, 3] , __magic_name__ : Any=4 , __magic_name__ : Optional[Any]=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : str=3 , __magic_name__ : List[str]=[1, 3_84, 24, 24] , __magic_name__ : List[Any]=True , __magic_name__ : int=None , ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Union[str, Any] = image_size UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : str = backbone_out_indices UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : List[str] = num_labels UpperCAmelCase_ : int = backbone_featmap_shape UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : str = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : Union[str, Any] = (image_size // patch_size) ** 2 UpperCAmelCase_ : Optional[Any] = num_patches + 1 def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : int = None if self.use_labels: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Optional[Any] = { '''global_padding''': '''same''', '''layer_type''': '''bottleneck''', '''depths''': [3, 4, 9], '''out_features''': ['''stage1''', '''stage2''', '''stage3'''], '''embedding_dynamic_padding''': True, '''hidden_sizes''': [96, 1_92, 3_84, 7_68], '''num_groups''': 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__magic_name__ , backbone_featmap_shape=self.backbone_featmap_shape , ) def UpperCAmelCase__ ( self : int , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = DPTModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Tuple = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : Optional[int] = DPTForDepthEstimation(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : int ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Optional[Any] = DPTForSemanticSegmentation(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Tuple = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : int = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () __a : Tuple = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) __a : List[str] = False __a : List[str] = False __a : Union[str, Any] = False def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" UpperCAmelCase_ : Dict = DPTModelTester(self ) UpperCAmelCase_ : List[str] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''DPT does not use inputs_embeds''' ) def UpperCAmelCase__ ( self : Optional[int] ) -> str: """simple docstring""" pass def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : str = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(__magic_name__ ) UpperCAmelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : str = [*signature.parameters.keys()] UpperCAmelCase_ : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__magic_name__ ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = True if model_class in get_values(__magic_name__ ): continue UpperCAmelCase_ : List[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.train() UpperCAmelCase_ : Tuple = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) UpperCAmelCase_ : Dict = model(**__magic_name__ ).loss loss.backward() def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : int = True if model_class in get_values(__magic_name__ ) or not model_class.supports_gradient_checkpointing: continue UpperCAmelCase_ : Optional[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.gradient_checkpointing_enable() model.train() UpperCAmelCase_ : int = self._prepare_for_class(__magic_name__ , __magic_name__ , return_labels=__magic_name__ ) UpperCAmelCase_ : int = model(**__magic_name__ ).loss loss.backward() def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = _config_zero_init(__magic_name__ ) for model_class in self.all_model_classes: UpperCAmelCase_ : int = model_class(config=__magic_name__ ) # Skip the check for the backbone UpperCAmelCase_ : Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": UpperCAmelCase_ : Any = [F"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: UpperCAmelCase_ : Tuple = DPTModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[str]: """simple docstring""" # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Optional[int] = '''add''' with self.assertRaises(__magic_name__ ): UpperCAmelCase_ : Union[str, Any] = DPTForDepthEstimation(__magic_name__ ) def lowerCamelCase_ ( ) -> Any: UpperCAmelCase_ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Dict = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' ) UpperCAmelCase_ : Any = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(__magic_name__ ) UpperCAmelCase_ : List[Any] = prepare_img() UpperCAmelCase_ : Tuple = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : int = model(**__magic_name__ ) UpperCAmelCase_ : Optional[int] = outputs.predicted_depth # verify the predicted depth UpperCAmelCase_ : Optional[int] = torch.Size((1, 3_84, 3_84) ) self.assertEqual(predicted_depth.shape , __magic_name__ ) UpperCAmelCase_ : List[str] = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __magic_name__ , atol=1E-4 ) )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = tempfile.mkdtemp() # fmt: off UpperCAmelCase_ : Dict = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on UpperCAmelCase_ : List[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : List[Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] UpperCAmelCase_ : Tuple = {'''unk_token''': '''<unk>'''} UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) UpperCAmelCase_ : List[Any] = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , __magic_name__ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , **__magic_name__ : Union[str, Any] ) -> Optional[int]: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : int , **__magic_name__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : List[str] ) -> int: """simple docstring""" return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ) -> int: """simple docstring""" UpperCAmelCase_ : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] UpperCAmelCase_ : Tuple = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_image_processor() UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __magic_name__ ) self.assertIsInstance(processor_fast.tokenizer , __magic_name__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __magic_name__ ) self.assertIsInstance(processor_fast.image_processor , __magic_name__ ) def UpperCAmelCase__ ( self : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) UpperCAmelCase_ : Union[str, Any] = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 ) UpperCAmelCase_ : Any = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__magic_name__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __magic_name__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __magic_name__ ) def UpperCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) UpperCAmelCase_ : List[Any] = self.prepare_image_inputs() UpperCAmelCase_ : Dict = image_processor(__magic_name__ , return_tensors='''np''' ) UpperCAmelCase_ : Dict = processor(images=__magic_name__ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[str] = self.get_image_processor() UpperCAmelCase_ : Optional[Any] = self.get_tokenizer() UpperCAmelCase_ : Any = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) UpperCAmelCase_ : int = '''lower newer''' UpperCAmelCase_ : str = processor(text=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer(__magic_name__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self : int ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.get_image_processor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) UpperCAmelCase_ : List[Any] = '''lower newer''' UpperCAmelCase_ : List[Any] = self.prepare_image_inputs() UpperCAmelCase_ : Optional[int] = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__magic_name__ ): processor() def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Any = self.get_image_processor() UpperCAmelCase_ : List[Any] = self.get_tokenizer() UpperCAmelCase_ : Optional[Any] = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) UpperCAmelCase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_ : Dict = processor.batch_decode(__magic_name__ ) UpperCAmelCase_ : List[str] = tokenizer.batch_decode(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : Optional[Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ ) UpperCAmelCase_ : List[str] = '''lower newer''' UpperCAmelCase_ : Any = self.prepare_image_inputs() UpperCAmelCase_ : Tuple = processor(text=__magic_name__ , images=__magic_name__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class __a (unittest.TestCase ): __a : str = MODEL_FOR_CAUSAL_LM_MAPPING __a : Any = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def UpperCAmelCase__ ( self : List[str] ) -> int: """simple docstring""" UpperCAmelCase_ : str = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output UpperCAmelCase_ : Optional[Any] = text_generator('''This is a test''' , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) UpperCAmelCase_ : Any = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __magic_name__ , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) UpperCAmelCase_ : List[Any] = text_generator('''This is a test''' , do_sample=__magic_name__ , num_return_sequences=2 , return_tensors=__magic_name__ ) self.assertEqual( __magic_name__ , [ {'''generated_token_ids''': ANY(__magic_name__ )}, {'''generated_token_ids''': ANY(__magic_name__ )}, ] , ) UpperCAmelCase_ : Any = text_generator.model.config.eos_token_id UpperCAmelCase_ : Union[str, Any] = '''<pad>''' UpperCAmelCase_ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__magic_name__ , num_return_sequences=2 , batch_size=2 , return_tensors=__magic_name__ , ) self.assertEqual( __magic_name__ , [ [ {'''generated_token_ids''': ANY(__magic_name__ )}, {'''generated_token_ids''': ANY(__magic_name__ )}, ], [ {'''generated_token_ids''': ANY(__magic_name__ )}, {'''generated_token_ids''': ANY(__magic_name__ )}, ], ] , ) @require_tf def UpperCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output UpperCAmelCase_ : Tuple = text_generator('''This is a test''' , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) UpperCAmelCase_ : List[Any] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : Any ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = TextGenerationPipeline(model=__magic_name__ , tokenizer=__magic_name__ ) return text_generator, ["This is a test", "Another test"] def UpperCAmelCase__ ( self : str ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[Any] = '''Hello I believe in''' UpperCAmelCase_ : Optional[int] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) UpperCAmelCase_ : Tuple = text_generator(__magic_name__ ) self.assertEqual( __magic_name__ , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) UpperCAmelCase_ : Optional[Any] = text_generator(__magic_name__ , stop_sequence=''' fe''' ) self.assertEqual(__magic_name__ , [{'''generated_text''': '''Hello I believe in fe'''}] ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = text_generator.model UpperCAmelCase_ : Optional[int] = text_generator.tokenizer UpperCAmelCase_ : Any = text_generator('''This is a test''' ) self.assertEqual(__magic_name__ , [{'''generated_text''': ANY(__magic_name__ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) UpperCAmelCase_ : Optional[int] = text_generator('''This is a test''' , return_full_text=__magic_name__ ) self.assertEqual(__magic_name__ , [{'''generated_text''': ANY(__magic_name__ )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) UpperCAmelCase_ : Optional[int] = pipeline(task='''text-generation''' , model=__magic_name__ , tokenizer=__magic_name__ , return_full_text=__magic_name__ ) UpperCAmelCase_ : str = text_generator('''This is a test''' ) self.assertEqual(__magic_name__ , [{'''generated_text''': ANY(__magic_name__ )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) UpperCAmelCase_ : Optional[int] = text_generator('''This is a test''' , return_full_text=__magic_name__ ) self.assertEqual(__magic_name__ , [{'''generated_text''': ANY(__magic_name__ )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) UpperCAmelCase_ : Any = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ [{'''generated_text''': ANY(__magic_name__ )}, {'''generated_text''': ANY(__magic_name__ )}], [{'''generated_text''': ANY(__magic_name__ )}, {'''generated_text''': ANY(__magic_name__ )}], ] , ) if text_generator.tokenizer.pad_token is not None: UpperCAmelCase_ : str = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__magic_name__ ) self.assertEqual( __magic_name__ , [ [{'''generated_text''': ANY(__magic_name__ )}, {'''generated_text''': ANY(__magic_name__ )}], [{'''generated_text''': ANY(__magic_name__ )}, {'''generated_text''': ANY(__magic_name__ )}], ] , ) with self.assertRaises(__magic_name__ ): UpperCAmelCase_ : Union[str, Any] = text_generator('''test''' , return_full_text=__magic_name__ , return_text=__magic_name__ ) with self.assertRaises(__magic_name__ ): UpperCAmelCase_ : List[str] = text_generator('''test''' , return_full_text=__magic_name__ , return_tensors=__magic_name__ ) with self.assertRaises(__magic_name__ ): UpperCAmelCase_ : List[Any] = text_generator('''test''' , return_text=__magic_name__ , return_tensors=__magic_name__ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): UpperCAmelCase_ : Union[str, Any] = text_generator('''''' ) self.assertEqual(__magic_name__ , [{'''generated_text''': ANY(__magic_name__ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): UpperCAmelCase_ : Any = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. UpperCAmelCase_ : Tuple = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_00_00 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 5_00 , max_new_tokens=20 ) UpperCAmelCase_ : int = text_generator('''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__magic_name__ ): text_generator( '''This is a test''' * 5_00 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" import torch # Classic `model_kwargs` UpperCAmelCase_ : Union[str, Any] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) UpperCAmelCase_ : Dict = pipe('''This is a test''' ) self.assertEqual( __magic_name__ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) UpperCAmelCase_ : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) UpperCAmelCase_ : Any = pipe('''This is a test''' ) self.assertEqual( __magic_name__ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 UpperCAmelCase_ : List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) UpperCAmelCase_ : Optional[int] = pipe('''This is a test''' ) self.assertEqual( __magic_name__ , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" import torch UpperCAmelCase_ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" import torch UpperCAmelCase_ : Tuple = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__magic_name__ , top_p=0.5 ) def UpperCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = '''Hello world''' UpperCAmelCase_ : Optional[int] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": UpperCAmelCase_ : str = logging.get_logger('''transformers.generation.tf_utils''' ) else: UpperCAmelCase_ : Optional[int] = logging.get_logger('''transformers.generation.utils''' ) UpperCAmelCase_ : str = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__magic_name__ ) as cl: UpperCAmelCase_ : int = text_generator(__magic_name__ , max_length=10 , max_new_tokens=1 ) self.assertIn(__magic_name__ , cl.out ) # The user only sets one -> no warning with CaptureLogger(__magic_name__ ) as cl: UpperCAmelCase_ : Any = text_generator(__magic_name__ , max_new_tokens=1 ) self.assertNotIn(__magic_name__ , cl.out ) with CaptureLogger(__magic_name__ ) as cl: UpperCAmelCase_ : Tuple = text_generator(__magic_name__ , max_length=10 ) self.assertNotIn(__magic_name__ , cl.out )
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' from __future__ import annotations import math def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> bool: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5, int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ), 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> list[int]: UpperCAmelCase_ : Tuple = str(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = [n] for i in range(1, len(SCREAMING_SNAKE_CASE__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> bool: if len(str(SCREAMING_SNAKE_CASE__ ) ) > 3: if not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[:3] ) ): return False return True def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 11 ) -> list[int]: UpperCAmelCase_ : list[int] = [] UpperCAmelCase_ : List[str] = 13 while len(SCREAMING_SNAKE_CASE__ ) != count: if validate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[str] = list_truncated_nums(SCREAMING_SNAKE_CASE__ ) if all(is_prime(SCREAMING_SNAKE_CASE__ ) for i in list_nums ): list_truncated_primes.append(SCREAMING_SNAKE_CASE__ ) num += 2 return list_truncated_primes def lowerCamelCase_ ( ) -> int: return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f'''{sum(compute_truncated_primes(11)) = }''')
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Dict ) -> int: UpperCAmelCase_ : str = OmegaConf.load(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = torch.load(SCREAMING_SNAKE_CASE__, map_location='''cpu''' )['''model'''] UpperCAmelCase_ : str = list(state_dict.keys() ) # extract state_dict for VQVAE UpperCAmelCase_ : List[str] = {} UpperCAmelCase_ : Tuple = '''first_stage_model.''' for key in keys: if key.startswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Dict = state_dict[key] # extract state_dict for UNetLDM UpperCAmelCase_ : Any = {} UpperCAmelCase_ : Optional[Any] = '''model.diffusion_model.''' for key in keys: if key.startswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = state_dict[key] UpperCAmelCase_ : int = config.model.params.first_stage_config.params UpperCAmelCase_ : int = config.model.params.unet_config.params UpperCAmelCase_ : List[str] = VQModel(**SCREAMING_SNAKE_CASE__ ).eval() vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = UNetLDMModel(**SCREAMING_SNAKE_CASE__ ).eval() unet.load_state_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Union[str, Any] = DDIMScheduler( timesteps=config.model.params.timesteps, beta_schedule='''scaled_linear''', beta_start=config.model.params.linear_start, beta_end=config.model.params.linear_end, clip_sample=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : Optional[int] = LDMPipeline(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) pipeline.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Optional[Any] = argparse.ArgumentParser() parser.add_argument("--checkpoint_path", type=str, required=True) parser.add_argument("--config_path", type=str, required=True) parser.add_argument("--output_path", type=str, required=True) snake_case_ : Union[str, Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' from collections import namedtuple snake_case_ : Optional[int] = namedtuple("from_to", "from_ to") snake_case_ : Optional[int] = { "cubicmeter": from_to(1, 1), "litre": from_to(0.001, 10_00), "kilolitre": from_to(1, 1), "gallon": from_to(0.0_0454, 264.172), "cubicyard": from_to(0.7_6455, 1.3_0795), "cubicfoot": from_to(0.028, 35.3147), "cup": from_to(0.0_0023_6588, 4226.75), } def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str ) -> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + ''', '''.join(SCREAMING_SNAKE_CASE__ ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + ''', '''.join(SCREAMING_SNAKE_CASE__ ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Optional[int] = None, ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = {} if train_file is not None: UpperCAmelCase_ : Optional[Any] = [train_file] if eval_file is not None: UpperCAmelCase_ : Any = [eval_file] if test_file is not None: UpperCAmelCase_ : int = [test_file] UpperCAmelCase_ : Dict = datasets.load_dataset('''csv''', data_files=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = list(ds[list(files.keys() )[0]].features.keys() ) UpperCAmelCase_ : int = features_name.pop(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = list(set(ds[list(files.keys() )[0]][label_name] ) ) UpperCAmelCase_ : int = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )} UpperCAmelCase_ : List[str] = tokenizer.model_input_names UpperCAmelCase_ : Union[str, Any] = {} if len(SCREAMING_SNAKE_CASE__ ) == 1: for k in files.keys(): UpperCAmelCase_ : int = ds[k].map( lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus( example[features_name[0]], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, padding='''max_length''' ), batched=SCREAMING_SNAKE_CASE__, ) elif len(SCREAMING_SNAKE_CASE__ ) == 2: for k in files.keys(): UpperCAmelCase_ : List[Any] = ds[k].map( lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]), truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, padding='''max_length''', ), batched=SCREAMING_SNAKE_CASE__, ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: UpperCAmelCase_ : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names} UpperCAmelCase_ : Dict = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: UpperCAmelCase_ : Dict = {k: v for k, v in ex.items() if k in input_names} UpperCAmelCase_ : List[Any] = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: UpperCAmelCase_ : List[str] = {k: v for k, v in ex.items() if k in input_names} UpperCAmelCase_ : Optional[int] = labelaid[ex[label_name]] yield (d, label) UpperCAmelCase_ : str = ( tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: UpperCAmelCase_ : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) UpperCAmelCase_ : Dict = ( tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: UpperCAmelCase_ : List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) UpperCAmelCase_ : List[str] = ( tf.data.Dataset.from_generator( SCREAMING_SNAKE_CASE__, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: UpperCAmelCase_ : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid snake_case_ : Optional[Any] = logging.getLogger(__name__) @dataclass class __a : __a : int = field(metadata={"help": "Which column contains the label"} ) __a : str = field(default=lowerCamelCase , metadata={"help": "The path of the training file"} ) __a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "The path of the development file"} ) __a : Optional[str] = field(default=lowerCamelCase , metadata={"help": "The path of the test file"} ) __a : int = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) __a : bool = field( default=lowerCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) @dataclass class __a : __a : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) __a : bool = field(default=lowerCamelCase , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. __a : Optional[str] = field( default=lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) def lowerCamelCase_ ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCAmelCase_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) logger.info( F"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """ F"""16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = get_tfds( train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=SCREAMING_SNAKE_CASE__, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, ) UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(SCREAMING_SNAKE_CASE__ ), labelaid=SCREAMING_SNAKE_CASE__, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='''text-classification''', cache_dir=model_args.cache_dir, ) with training_args.strategy.scope(): UpperCAmelCase_ : str = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_pt=bool('''.bin''' in model_args.model_name_or_path ), config=SCREAMING_SNAKE_CASE__, cache_dir=model_args.cache_dir, ) def compute_metrics(SCREAMING_SNAKE_CASE__ : EvalPrediction ) -> Dict: UpperCAmelCase_ : List[Any] = np.argmax(p.predictions, axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer UpperCAmelCase_ : List[Any] = TFTrainer( model=SCREAMING_SNAKE_CASE__, args=SCREAMING_SNAKE_CASE__, train_dataset=SCREAMING_SNAKE_CASE__, eval_dataset=SCREAMING_SNAKE_CASE__, compute_metrics=SCREAMING_SNAKE_CASE__, ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation UpperCAmelCase_ : Optional[int] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) UpperCAmelCase_ : Tuple = trainer.evaluate() UpperCAmelCase_ : int = os.path.join(training_args.output_dir, '''eval_results.txt''' ) with open(SCREAMING_SNAKE_CASE__, '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(F""" {key} = {value}""" ) writer.write(F"""{key} = {value}\n""" ) results.update(SCREAMING_SNAKE_CASE__ ) return results if __name__ == "__main__": main()
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' from sklearn.metrics import fa_score import datasets snake_case_ : Union[str, Any] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" snake_case_ : Dict = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" snake_case_ : Any = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a (datasets.Metric ): def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ), '''references''': datasets.Sequence(datasets.Value('''int32''' ) ), } if self.config_name == '''multilabel''' else { '''predictions''': datasets.Value('''int32''' ), '''references''': datasets.Value('''int32''' ), } ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=1 , __magic_name__ : Tuple="binary" , __magic_name__ : List[Any]=None ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = fa_score( __magic_name__ , __magic_name__ , labels=__magic_name__ , pos_label=__magic_name__ , average=__magic_name__ , sample_weight=__magic_name__ ) return {"f1": float(__magic_name__ ) if score.size == 1 else score}
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) snake_case_ : Union[str, Any] = { "facebook/data2vec-vision-base-ft": ( "https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json" ), } class __a (lowerCamelCase ): __a : List[Any] = "data2vec-vision" def __init__( self : List[Any] , __magic_name__ : Dict=7_68 , __magic_name__ : int=12 , __magic_name__ : Dict=12 , __magic_name__ : List[str]=30_72 , __magic_name__ : Dict="gelu" , __magic_name__ : Dict=0.0 , __magic_name__ : str=0.0 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Tuple=1E-12 , __magic_name__ : List[Any]=2_24 , __magic_name__ : Any=16 , __magic_name__ : Union[str, Any]=3 , __magic_name__ : Optional[int]=False , __magic_name__ : Union[str, Any]=False , __magic_name__ : Dict=False , __magic_name__ : int=False , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Dict=True , __magic_name__ : Any=[3, 5, 7, 11] , __magic_name__ : Any=[1, 2, 3, 6] , __magic_name__ : int=True , __magic_name__ : List[str]=0.4 , __magic_name__ : Any=2_56 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : List[str]=False , __magic_name__ : List[str]=2_55 , **__magic_name__ : int , ) -> Union[str, Any]: """simple docstring""" super().__init__(**__magic_name__ ) UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : str = num_attention_heads UpperCAmelCase_ : str = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = initializer_range UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : Optional[int] = image_size UpperCAmelCase_ : int = patch_size UpperCAmelCase_ : Union[str, Any] = num_channels UpperCAmelCase_ : List[Any] = use_mask_token UpperCAmelCase_ : List[Any] = use_absolute_position_embeddings UpperCAmelCase_ : str = use_relative_position_bias UpperCAmelCase_ : Tuple = use_shared_relative_position_bias UpperCAmelCase_ : Tuple = layer_scale_init_value UpperCAmelCase_ : Union[str, Any] = drop_path_rate UpperCAmelCase_ : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) UpperCAmelCase_ : Optional[int] = out_indices UpperCAmelCase_ : List[Any] = pool_scales # auxiliary head attributes (semantic segmentation) UpperCAmelCase_ : str = use_auxiliary_head UpperCAmelCase_ : List[Any] = auxiliary_loss_weight UpperCAmelCase_ : Dict = auxiliary_channels UpperCAmelCase_ : str = auxiliary_num_convs UpperCAmelCase_ : Union[str, Any] = auxiliary_concat_input UpperCAmelCase_ : List[Any] = semantic_loss_ignore_index class __a (lowerCamelCase ): __a : Union[str, Any] = version.parse("1.11" ) @property def UpperCAmelCase__ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : List[Any] ) -> float: """simple docstring""" return 1E-4
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1
'''simple docstring''' import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> np.ndarray: # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: UpperCAmelCase_ : Any = ksize + 1 UpperCAmelCase_ : Tuple = np.zeros((ksize, ksize), dtype=np.floataa ) # each value for y in range(SCREAMING_SNAKE_CASE__ ): for x in range(SCREAMING_SNAKE_CASE__ ): # distance from center UpperCAmelCase_ : Dict = x - ksize // 2 UpperCAmelCase_ : str = y - ksize // 2 # degree to radiant UpperCAmelCase_ : List[Any] = theta / 180 * np.pi UpperCAmelCase_ : Optional[Any] = np.cos(_theta ) UpperCAmelCase_ : int = np.sin(_theta ) # get kernel x UpperCAmelCase_ : int = cos_theta * px + sin_theta * py # get kernel y UpperCAmelCase_ : Any = -sin_theta * px + cos_theta * py # fill kernel UpperCAmelCase_ : str = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image snake_case_ : Tuple = imread("../image_data/lena.jpg") # turn image in gray scale value snake_case_ : Optional[int] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges snake_case_ : Tuple = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: snake_case_ : List[str] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) snake_case_ : Optional[Any] = out / out.max() * 2_55 snake_case_ : int = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
644
'''simple docstring''' import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging snake_case_ : str = logging.get_logger(__name__) snake_case_ : int = "▁" snake_case_ : str = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} snake_case_ : int = { "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } snake_case_ : Optional[Any] = { "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } snake_case_ : Dict = { "ernie-m-base": 5_14, "ernie-m-large": 5_14, } snake_case_ : Any = { "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class __a (lowerCamelCase ): __a : List[str] = ["input_ids"] __a : Union[str, Any] = VOCAB_FILES_NAMES __a : Tuple = PRETRAINED_INIT_CONFIGURATION __a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __a : Union[str, Any] = RESOURCE_FILES_NAMES def __init__( self : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : int=None , __magic_name__ : str=False , __magic_name__ : int="utf8" , __magic_name__ : Optional[int]="[UNK]" , __magic_name__ : Dict="[SEP]" , __magic_name__ : List[Any]="[PAD]" , __magic_name__ : str="[CLS]" , __magic_name__ : Optional[int]="[MASK]" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : Union[str, Any] , ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. UpperCAmelCase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , vocab_file=__magic_name__ , encoding=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , ) UpperCAmelCase_ : Optional[Any] = do_lower_case UpperCAmelCase_ : List[str] = sentencepiece_model_ckpt UpperCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__magic_name__ ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: UpperCAmelCase_ : List[Any] = self.load_vocab(filepath=__magic_name__ ) else: UpperCAmelCase_ : str = {self.sp_model.id_to_piece(__magic_name__ ): id for id in range(self.sp_model.get_piece_size() )} UpperCAmelCase_ : int = {v: k for k, v in self.vocab.items()} def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any ) -> Any: """simple docstring""" if text is None: return None UpperCAmelCase_ : str = self.tokenize(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = '''''', [] for i, ch in enumerate(__magic_name__ ): if ch in self.SP_CHAR_MAPPING: UpperCAmelCase_ : Optional[int] = self.SP_CHAR_MAPPING.get(__magic_name__ ) else: UpperCAmelCase_ : Union[str, Any] = unicodedata.normalize('''NFKC''' , __magic_name__ ) if self.is_whitespace(__magic_name__ ): continue normalized_text += ch char_mapping.extend([i] * len(__magic_name__ ) ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = normalized_text, [], 0 if self.do_lower_case: UpperCAmelCase_ : Optional[int] = text.lower() for token in split_tokens: if token[:1] == "▁": UpperCAmelCase_ : Tuple = token[1:] UpperCAmelCase_ : int = text[offset:].index(__magic_name__ ) + offset UpperCAmelCase_ : Optional[int] = start + len(__magic_name__ ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) UpperCAmelCase_ : int = end return token_mapping @property def UpperCAmelCase__ ( self : Any ) -> Any: """simple docstring""" return len(self.vocab ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" return dict(self.vocab , **self.added_tokens_encoder ) def __getstate__( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.__dict__.copy() UpperCAmelCase_ : Optional[Any] = None return state def __setstate__( self : str , __magic_name__ : Any ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Any ) -> List[str]: """simple docstring""" return "".join((self.SP_CHAR_MAPPING.get(__magic_name__ , __magic_name__ ) for c in text) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple , __magic_name__ : Any=False , __magic_name__ : List[str]=64 , __magic_name__ : List[str]=0.1 ) -> List[str]: """simple docstring""" if self.sp_model_kwargs.get('''enable_sampling''' ) is True: UpperCAmelCase_ : Dict = True if self.sp_model_kwargs.get('''alpha''' ) is not None: UpperCAmelCase_ : Union[str, Any] = self.sp_model_kwargs.get('''alpha''' ) if self.sp_model_kwargs.get('''nbest_size''' ) is not None: UpperCAmelCase_ : Any = self.sp_model_kwargs.get('''nbest_size''' ) if not enable_sampling: UpperCAmelCase_ : Dict = self.sp_model.EncodeAsPieces(__magic_name__ ) else: UpperCAmelCase_ : Dict = self.sp_model.SampleEncodeAsPieces(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = [] for pi, piece in enumerate(__magic_name__ ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__magic_name__ ) and pi != 0: new_pieces.append(__magic_name__ ) continue else: continue UpperCAmelCase_ : List[str] = 0 for i, chunk in enumerate(__magic_name__ ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__magic_name__ ) or self.is_punct(__magic_name__ ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__magic_name__ ) UpperCAmelCase_ : List[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : List[str] = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) UpperCAmelCase_ : str = i if len(__magic_name__ ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = self.convert_ids_to_tokens(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ''''''.join(__magic_name__ ).replace(__magic_name__ , ''' ''' ).strip() return out_string def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[Any] ) -> List[Any]: """simple docstring""" return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.reverse_vocab.get(__magic_name__ , self.unk_token ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any , __magic_name__ : Union[str, Any]=None ) -> Any: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Union[str, Any] = [self.cls_token_id] UpperCAmelCase_ : List[Any] = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None ) -> int: """simple docstring""" if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[str]=None , __magic_name__ : Optional[Any]=False ) -> Optional[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" # called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method if token_ids_a is None: # [CLS] X [SEP] return (len(__magic_name__ ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__magic_name__ ) + 1) + [1] * (len(__magic_name__ ) + 3) def UpperCAmelCase__ ( self : Dict , __magic_name__ : str ) -> Tuple: """simple docstring""" if "\u4e00" <= char <= "\u9fff": return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] ) -> str: """simple docstring""" if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] ) -> Dict: """simple docstring""" if char in ",;:.?!~,;:。?!《》【】": return True return False def UpperCAmelCase__ ( self : Tuple , __magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__magic_name__ ) == 1: UpperCAmelCase_ : Optional[Any] = unicodedata.category(__magic_name__ ) if cat == "Zs": return True return False def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Tuple ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = {} with io.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(__magic_name__ ): UpperCAmelCase_ : List[Any] = line.rstrip('''\n''' ) UpperCAmelCase_ : Dict = int(__magic_name__ ) return token_to_idx def UpperCAmelCase__ ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = 0 if os.path.isdir(__magic_name__ ): UpperCAmelCase_ : Any = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) else: UpperCAmelCase_ : List[str] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: for token, token_index in sorted(self.vocab.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ''' Please check that the vocabulary is not corrupted!''' ) UpperCAmelCase_ : Dict = token_index writer.write(token + '''\n''' ) index += 1 UpperCAmelCase_ : Union[str, Any] = os.path.join(__magic_name__ , '''sentencepiece.bpe.model''' ) with open(__magic_name__ , '''wb''' ) as fi: UpperCAmelCase_ : Optional[int] = self.sp_model.serialized_model_proto() fi.write(__magic_name__ ) return (vocab_file,)
644
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() snake_case_ : Tuple = logging.get_logger(__name__) snake_case_ : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } snake_case_ : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any: UpperCAmelCase_ : Any = {} with open(SCREAMING_SNAKE_CASE__, '''r''' ) as file: for line_number, line in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Dict = line.strip() if line: UpperCAmelCase_ : Tuple = line.split() UpperCAmelCase_ : Any = line_number UpperCAmelCase_ : Any = words[0] UpperCAmelCase_ : Any = value return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : int ) -> List[Any]: for attribute in key.split('''.''' ): UpperCAmelCase_ : int = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]] UpperCAmelCase_ : str = '''param''' if weight_type is not None and weight_type != "param": UpperCAmelCase_ : str = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ).shape elif weight_type is not None and weight_type == "param": UpperCAmelCase_ : Tuple = hf_pointer for attribute in hf_param_name.split('''.''' ): UpperCAmelCase_ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = shape_pointer.shape # let's reduce dimension UpperCAmelCase_ : str = value[0] else: UpperCAmelCase_ : Optional[Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCAmelCase_ : int = value elif weight_type == "weight_g": UpperCAmelCase_ : str = value elif weight_type == "weight_v": UpperCAmelCase_ : List[Any] = value elif weight_type == "bias": UpperCAmelCase_ : List[str] = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): UpperCAmelCase_ : Dict = getattr(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = value else: UpperCAmelCase_ : Optional[int] = value logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : str ) -> int: UpperCAmelCase_ : Union[str, Any] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Tuple = PARAM_MAPPING[full_name.split('''.''' )[-1]] UpperCAmelCase_ : Optional[int] = '''param''' if weight_type is not None and weight_type != "param": UpperCAmelCase_ : Optional[Any] = '''.'''.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": UpperCAmelCase_ : Union[str, Any] = '''.'''.join([key, hf_param_name] ) else: UpperCAmelCase_ : str = key UpperCAmelCase_ : Any = value if '''lm_head''' in full_key else value[0] snake_case_ : int = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any=None, SCREAMING_SNAKE_CASE__ : Dict=None ) -> Optional[int]: UpperCAmelCase_ : Tuple = False for key, mapped_key in MAPPING.items(): UpperCAmelCase_ : Union[str, Any] = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: UpperCAmelCase_ : List[Any] = True if "*" in mapped_key: UpperCAmelCase_ : List[Any] = name.split(SCREAMING_SNAKE_CASE__ )[0].split('''.''' )[-2] UpperCAmelCase_ : Dict = mapped_key.replace('''*''', SCREAMING_SNAKE_CASE__ ) if "weight_g" in name: UpperCAmelCase_ : List[str] = '''weight_g''' elif "weight_v" in name: UpperCAmelCase_ : Optional[Any] = '''weight_v''' elif "bias" in name: UpperCAmelCase_ : Union[str, Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCAmelCase_ : Optional[Any] = '''weight''' else: UpperCAmelCase_ : Optional[Any] = None if hf_dict is not None: rename_dict(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) else: set_recursively(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) return is_used return is_used def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]: UpperCAmelCase_ : str = [] UpperCAmelCase_ : Dict = fairseq_model.state_dict() UpperCAmelCase_ : List[str] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): UpperCAmelCase_ : int = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, hf_model.config.feat_extract_norm == '''group''', ) UpperCAmelCase_ : Any = True else: UpperCAmelCase_ : List[str] = load_wavaveca_layer(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = full_name.split('''conv_layers.''' )[-1] UpperCAmelCase_ : int = name.split('''.''' ) UpperCAmelCase_ : Any = int(items[0] ) UpperCAmelCase_ : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCAmelCase_ : Optional[Any] = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCAmelCase_ : Dict = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCAmelCase_ : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCAmelCase_ : str = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Optional[Any]=None, SCREAMING_SNAKE_CASE__ : Tuple=None, SCREAMING_SNAKE_CASE__ : List[str]=True, SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> str: if config_path is not None: UpperCAmelCase_ : Optional[int] = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase_ : str = WavaVecaConfig() if is_seq_class: UpperCAmelCase_ : List[str] = read_txt_into_dict(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Tuple = idalabel UpperCAmelCase_ : int = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, ) feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ ) elif is_finetuned: if dict_path: UpperCAmelCase_ : Any = Dictionary.load(SCREAMING_SNAKE_CASE__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCAmelCase_ : Optional[int] = target_dict.pad_index UpperCAmelCase_ : Optional[Any] = target_dict.bos_index UpperCAmelCase_ : Tuple = target_dict.eos_index UpperCAmelCase_ : Dict = len(target_dict.symbols ) UpperCAmelCase_ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE__, '''vocab.json''' ) if not os.path.isdir(SCREAMING_SNAKE_CASE__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE__ ) ) return os.makedirs(SCREAMING_SNAKE_CASE__, exist_ok=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = target_dict.indices # fairseq has the <pad> and <s> switched UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : List[str] = 1 with open(SCREAMING_SNAKE_CASE__, '''w''', encoding='''utf-8''' ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE__, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='''|''', do_lower_case=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = True if config.feat_extract_norm == '''layer''' else False UpperCAmelCase_ : int = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=SCREAMING_SNAKE_CASE__, return_attention_mask=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : Optional[Any] = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__, tokenizer=SCREAMING_SNAKE_CASE__ ) processor.save_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = WavaVecaForCTC(SCREAMING_SNAKE_CASE__ ) else: UpperCAmelCase_ : Optional[int] = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE__ ) if is_finetuned or is_seq_class: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: UpperCAmelCase_ : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' ) UpperCAmelCase_ : List[str] = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": snake_case_ : Any = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) snake_case_ : int = parser.parse_args() snake_case_ : List[str] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> str: if number > 0: raise ValueError('''input must be a negative integer''' ) UpperCAmelCase_ : Union[str, Any] = len(bin(SCREAMING_SNAKE_CASE__ )[3:] ) UpperCAmelCase_ : Union[str, Any] = bin(abs(SCREAMING_SNAKE_CASE__ ) - (1 << binary_number_length) )[3:] UpperCAmelCase_ : Optional[Any] = ( ( '''1''' + '''0''' * (binary_number_length - len(SCREAMING_SNAKE_CASE__ )) + twos_complement_number ) if number < 0 else '''0''' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging snake_case_ : Optional[Any] = logging.get_logger(__name__) snake_case_ : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED snake_case_ : Optional[Any] = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } snake_case_ : Union[str, Any] = { "allenai/led-base-16384": 1_63_84, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase_ ( ) -> List[Any]: UpperCAmelCase_ : List[str] = ( list(range(ord('''!''' ), ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ), ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ), ord('''ÿ''' ) + 1 ) ) ) UpperCAmelCase_ : List[Any] = bs[:] UpperCAmelCase_ : List[Any] = 0 for b in range(2**8 ): if b not in bs: bs.append(SCREAMING_SNAKE_CASE__ ) cs.append(2**8 + n ) n += 1 UpperCAmelCase_ : Dict = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs] return dict(zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = set() UpperCAmelCase_ : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase_ : List[str] = char return pairs class __a (lowerCamelCase ): __a : Tuple = VOCAB_FILES_NAMES __a : int = PRETRAINED_VOCAB_FILES_MAP __a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a : str = ["input_ids", "attention_mask"] def __init__( self : Dict , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : str="replace" , __magic_name__ : Any="<s>" , __magic_name__ : int="</s>" , __magic_name__ : Tuple="</s>" , __magic_name__ : List[str]="<s>" , __magic_name__ : List[Any]="<unk>" , __magic_name__ : Tuple="<pad>" , __magic_name__ : str="<mask>" , __magic_name__ : Optional[Any]=False , **__magic_name__ : int , ) -> int: """simple docstring""" UpperCAmelCase_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else bos_token UpperCAmelCase_ : int = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token UpperCAmelCase_ : List[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else sep_token UpperCAmelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else cls_token UpperCAmelCase_ : Optional[int] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token UpperCAmelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : List[str] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token super().__init__( errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , **__magic_name__ , ) with open(__magic_name__ , encoding='''utf-8''' ) as vocab_handle: UpperCAmelCase_ : Optional[Any] = json.load(__magic_name__ ) UpperCAmelCase_ : int = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : int = errors # how to handle errors in decoding UpperCAmelCase_ : Tuple = bytes_to_unicode() UpperCAmelCase_ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__magic_name__ , encoding='''utf-8''' ) as merges_handle: UpperCAmelCase_ : Any = merges_handle.read().split('''\n''' )[1:-1] UpperCAmelCase_ : str = [tuple(merge.split() ) for merge in bpe_merges] UpperCAmelCase_ : int = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : List[Any] = {} UpperCAmelCase_ : Tuple = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions UpperCAmelCase_ : Dict = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def UpperCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[str] ) -> Tuple: """simple docstring""" if token in self.cache: return self.cache[token] UpperCAmelCase_ : List[str] = tuple(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = get_pairs(__magic_name__ ) if not pairs: return token while True: UpperCAmelCase_ : str = min(__magic_name__ , key=lambda __magic_name__ : self.bpe_ranks.get(__magic_name__ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = bigram UpperCAmelCase_ : List[str] = [] UpperCAmelCase_ : List[str] = 0 while i < len(__magic_name__ ): try: UpperCAmelCase_ : Tuple = word.index(__magic_name__ , __magic_name__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase_ : List[str] = j if word[i] == first and i < len(__magic_name__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase_ : Tuple = tuple(__magic_name__ ) UpperCAmelCase_ : Any = new_word if len(__magic_name__ ) == 1: break else: UpperCAmelCase_ : Union[str, Any] = get_pairs(__magic_name__ ) UpperCAmelCase_ : List[Any] = ''' '''.join(__magic_name__ ) UpperCAmelCase_ : Tuple = word return word def UpperCAmelCase__ ( self : int , __magic_name__ : List[str] ) -> Any: """simple docstring""" UpperCAmelCase_ : List[Any] = [] for token in re.findall(self.pat , __magic_name__ ): UpperCAmelCase_ : Union[str, Any] = ''''''.join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__magic_name__ ).split(''' ''' ) ) return bpe_tokens def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[int] ) -> List[str]: """simple docstring""" return self.encoder.get(__magic_name__ , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : int ) -> Any: """simple docstring""" return self.decoder.get(__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Any = ''''''.join(__magic_name__ ) UpperCAmelCase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__magic_name__ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : Union[str, Any] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Optional[int] = os.path.join( __magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + '''\n''' ) UpperCAmelCase_ : Tuple = 0 with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __magic_name__ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) UpperCAmelCase_ : int = token_index writer.write(''' '''.join(__magic_name__ ) + '''\n''' ) index += 1 return vocab_file, merge_file def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : Any = [self.cls_token_id] UpperCAmelCase_ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase__ ( self : Any , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ ) if token_ids_a is None: return [1] + ([0] * len(__magic_name__ )) + [1] return [1] + ([0] * len(__magic_name__ )) + [1, 1] + ([0] * len(__magic_name__ )) + [1] def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase_ : List[Any] = [self.sep_token_id] UpperCAmelCase_ : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase__ ( self : str , __magic_name__ : Union[str, Any] , __magic_name__ : str=False , **__magic_name__ : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__magic_name__ ) > 0 and not text[0].isspace()): UpperCAmelCase_ : Tuple = ''' ''' + text return (text, kwargs) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Union[Dict[str, EncodedInput], BatchEncoding] , __magic_name__ : Optional[int] = None , __magic_name__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , ) -> dict: """simple docstring""" UpperCAmelCase_ : Any = super()._pad( encoded_inputs=__magic_name__ , max_length=__magic_name__ , padding_strategy=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=__magic_name__ , ) # Load from model defaults if return_attention_mask is None: UpperCAmelCase_ : int = '''attention_mask''' in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: UpperCAmelCase_ : int = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. UpperCAmelCase_ : List[str] = len(encoded_inputs['''global_attention_mask'''] ) != len(__magic_name__ ) if needs_to_be_padded: UpperCAmelCase_ : Union[str, Any] = len(__magic_name__ ) - len(encoded_inputs['''global_attention_mask'''] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` UpperCAmelCase_ : Union[str, Any] = ( encoded_inputs['''global_attention_mask'''] + [-1] * difference ) elif self.padding_side == "left": UpperCAmelCase_ : Any = [-1] * difference + encoded_inputs[ '''global_attention_mask''' ] else: raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) ) return encoded_inputs
644
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]: """simple docstring""" # For consistency across different places the DisjunctiveConstraint is called, # dc.token_ids is a list of integers. It is also initialized only by integers. UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__magic_name__ ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" # We can't have constraints that are complete subsets of another. This leads to a preverse # interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint? # It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially # fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm # will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it). UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__magic_name__ ): DisjunctiveConstraint(__magic_name__ ) # fails here def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 ) UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 ) UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False self.assertTrue(__magic_name__ ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : int ) -> Dict: """simple docstring""" UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
644
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class __a (lowerCamelCase ): def __init__( self : List[Any] , __magic_name__ : Any="" , __magic_name__ : Tuple="train" ) -> int: """simple docstring""" assert os.path.isdir(__magic_name__ ) UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Union[str, Any] = os.listdir(__magic_name__ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue UpperCAmelCase_ : Dict = os.path.join(__magic_name__ , __magic_name__ ) if not os.path.isfile(__magic_name__ ): continue self.documents.append(__magic_name__ ) def __len__( self : List[str] ) -> Optional[Any]: """simple docstring""" return len(self.documents ) def __getitem__( self : Union[str, Any] , __magic_name__ : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.documents[idx] UpperCAmelCase_ : str = document_path.split('''/''' )[-1] with open(__magic_name__ , encoding='''utf-8''' ) as source: UpperCAmelCase_ : str = source.read() UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = process_story(__magic_name__ ) return document_name, story_lines, summary_lines def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE__ : len(SCREAMING_SNAKE_CASE__ ) != 0, [line.strip() for line in raw_story.split('''\n''' )] ) ) # for some unknown reason some lines miss a period, add it UpperCAmelCase_ : int = [_add_missing_period(SCREAMING_SNAKE_CASE__ ) for line in nonempty_lines] # gather article lines UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Union[str, Any] = deque(SCREAMING_SNAKE_CASE__ ) while True: try: UpperCAmelCase_ : List[Any] = lines.popleft() if element.startswith('''@highlight''' ): break story_lines.append(SCREAMING_SNAKE_CASE__ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines UpperCAmelCase_ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE__ : not t.startswith('''@highlight''' ), SCREAMING_SNAKE_CASE__ ) ) return story_lines, summary_lines def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')'''] if line.startswith('''@highlight''' ): return line if line[-1] in END_TOKENS: return line return line + "." def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]: if len(SCREAMING_SNAKE_CASE__ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE__ )) ) return sequence def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = torch.ones_like(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = sequence == pad_token_id UpperCAmelCase_ : Tuple = 0 return mask def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : str ) -> Dict: UpperCAmelCase_ : Tuple = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in story_lines] UpperCAmelCase_ : Any = [token for sentence in story_lines_token_ids for token in sentence] UpperCAmelCase_ : List[str] = [tokenizer.encode(SCREAMING_SNAKE_CASE__ ) for line in summary_lines] UpperCAmelCase_ : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ : Tuple = [] for sequence in batch: UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : Tuple = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(SCREAMING_SNAKE_CASE__ ) return torch.tensor(SCREAMING_SNAKE_CASE__ )
644
'''simple docstring''' import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": snake_case_ : List[Any] = pd.read_csv("sample_data.csv", header=None) snake_case_ : Optional[Any] = df.shape[:1][0] # If you're using some other dataset input the target column snake_case_ : Any = df.iloc[:, 1:2] snake_case_ : str = actual_data.values.reshape(len_data, 1) snake_case_ : Optional[Any] = MinMaxScaler().fit_transform(actual_data) snake_case_ : List[str] = 10 snake_case_ : Any = 5 snake_case_ : Any = 20 snake_case_ : Tuple = len_data - periods * look_back snake_case_ : str = actual_data[:division] snake_case_ : Optional[int] = actual_data[division - look_back :] snake_case_ ,snake_case_ : Any = [], [] snake_case_ ,snake_case_ : Union[str, Any] = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) snake_case_ : Any = np.array(train_x) snake_case_ : Optional[Any] = np.array(test_x) snake_case_ : Optional[Any] = np.array([list(i.ravel()) for i in train_y]) snake_case_ : List[str] = np.array([list(i.ravel()) for i in test_y]) snake_case_ : List[Any] = Sequential() model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(1_28, 1))) model.add(Dense(forward_days)) model.compile(loss="mean_squared_error", optimizer="adam") snake_case_ : Dict = model.fit( x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4 ) snake_case_ : Optional[Any] = model.predict(x_test)
644
1
'''simple docstring''' import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> str: UpperCAmelCase_ : str = [] for line in lines: UpperCAmelCase_ : Tuple = re.sub(R'''#.*''', '''''', SCREAMING_SNAKE_CASE__ ) # remove comments if line: filtered_lines.append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = '''\n'''.join(SCREAMING_SNAKE_CASE__ ) # Make a hash from all this code UpperCAmelCase_ : Dict = full_str.encode('''utf-8''' ) return shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() # get importable module names and hash for caching snake_case_ : Union[str, Any] = { "csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), "json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), "pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), "parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), "arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), "text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), "imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), "audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions snake_case_ : str = { ".csv": ("csv", {}), ".tsv": ("csv", {"sep": "\t"}), ".json": ("json", {}), ".jsonl": ("json", {}), ".parquet": ("parquet", {}), ".arrow": ("arrow", {}), ".txt": ("text", {}), } _EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) snake_case_ : List[Any] = {"imagefolder", "audiofolder"} # Used to filter data files based on extensions given a module name snake_case_ : Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append(".zip") _MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
644
'''simple docstring''' from typing import Any, Callable, Dict, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker snake_case_ : Union[str, Any] = "CompVis/stable-diffusion-v1-1" snake_case_ : Dict = "CompVis/stable-diffusion-v1-2" snake_case_ : Any = "CompVis/stable-diffusion-v1-3" snake_case_ : str = "CompVis/stable-diffusion-v1-4" class __a (lowerCamelCase ): def __init__( self : Any , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , __magic_name__ : bool = True , ) -> str: """simple docstring""" super()._init_() UpperCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = StableDiffusionPipeline.from_pretrained(__magic_name__ ) UpperCAmelCase_ : Tuple = StableDiffusionPipeline( vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , requires_safety_checker=__magic_name__ , ) self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea ) @property def UpperCAmelCase__ ( self : Tuple ) -> Dict[str, Any]: """simple docstring""" return {k: getattr(self , __magic_name__ ) for k in self.config.keys() if not k.startswith('''_''' )} def UpperCAmelCase__ ( self : Dict , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> int: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase_ : List[str] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" self.enable_attention_slicing(__magic_name__ ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Any , ) -> Any: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Dict , ) -> List[str]: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : int , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> str: """simple docstring""" return self.pipea( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) @torch.no_grad() def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Optional[int] , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu''' self.to(__magic_name__ ) # Checks if the height and width are divisible by 8 or not if height % 8 != 0 or width % 8 != 0: raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" ) # Get first result from Stable Diffusion Checkpoint v1.1 UpperCAmelCase_ : Optional[int] = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.2 UpperCAmelCase_ : int = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.3 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get first result from Stable Diffusion Checkpoint v1.4 UpperCAmelCase_ : str = self.textaimg_sda_a( prompt=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , **__magic_name__ , ) # Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
644
1
'''simple docstring''' from __future__ import annotations from statistics import mean def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : int ) -> list[int]: UpperCAmelCase_ : List[str] = [0] * no_of_processes UpperCAmelCase_ : Optional[int] = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : int = burst_time[i] UpperCAmelCase_ : list[int] = [] UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Any = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Any = -1 for i in range(SCREAMING_SNAKE_CASE__ ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0: UpperCAmelCase_ : int = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: UpperCAmelCase_ : str = i total_time += burst_time[target_process] completed += 1 UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : list[int] ) -> list[int]: UpperCAmelCase_ : Union[str, Any] = [0] * no_of_processes for i in range(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Dict = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("[TEST CASE 01]") snake_case_ : Tuple = 4 snake_case_ : int = [2, 5, 3, 7] snake_case_ : Union[str, Any] = [0, 0, 0, 0] snake_case_ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes) snake_case_ : Tuple = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time") for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
644
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler snake_case_ : Optional[int] = 16 snake_case_ : Tuple = 32 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Accelerator, SCREAMING_SNAKE_CASE__ : int = 16, SCREAMING_SNAKE_CASE__ : str = "bert-base-cased" ) -> Dict: UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = load_dataset('''glue''', '''mrpc''' ) def tokenize_function(SCREAMING_SNAKE_CASE__ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase_ : Union[str, Any] = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase_ : Tuple = datasets.map( SCREAMING_SNAKE_CASE__, batched=SCREAMING_SNAKE_CASE__, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], load_from_cache_file=SCREAMING_SNAKE_CASE__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library UpperCAmelCase_ : Optional[Any] = tokenized_datasets.rename_column('''label''', '''labels''' ) def collate_fn(SCREAMING_SNAKE_CASE__ : str ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''max_length''', max_length=128, return_tensors='''pt''' ) return tokenizer.pad(SCREAMING_SNAKE_CASE__, padding='''longest''', return_tensors='''pt''' ) # Instantiate dataloaders. UpperCAmelCase_ : str = DataLoader( tokenized_datasets['''train'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = DataLoader( tokenized_datasets['''validation'''], shuffle=SCREAMING_SNAKE_CASE__, collate_fn=SCREAMING_SNAKE_CASE__, batch_size=SCREAMING_SNAKE_CASE__ ) return train_dataloader, eval_dataloader def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : Any ) -> Any: model.eval() UpperCAmelCase_ : List[str] = 0 for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): UpperCAmelCase_ : Dict = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(SCREAMING_SNAKE_CASE__ ) - 1: UpperCAmelCase_ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen] UpperCAmelCase_ : int = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=SCREAMING_SNAKE_CASE__, references=SCREAMING_SNAKE_CASE__, ) UpperCAmelCase_ : List[str] = metric.compute() return eval_metric["accuracy"] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : int ) -> Tuple: # Initialize accelerator UpperCAmelCase_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs UpperCAmelCase_ : int = config['''lr'''] UpperCAmelCase_ : Optional[int] = int(config['''num_epochs'''] ) UpperCAmelCase_ : Optional[int] = int(config['''seed'''] ) UpperCAmelCase_ : List[str] = int(config['''batch_size'''] ) UpperCAmelCase_ : Optional[int] = args.model_name_or_path set_seed(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) UpperCAmelCase_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE__, return_dict=SCREAMING_SNAKE_CASE__ ) # Instantiate optimizer UpperCAmelCase_ : str = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) UpperCAmelCase_ : List[str] = optimizer_cls(params=model.parameters(), lr=SCREAMING_SNAKE_CASE__ ) if accelerator.state.deepspeed_plugin is not None: UpperCAmelCase_ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: UpperCAmelCase_ : Tuple = 1 UpperCAmelCase_ : int = (len(SCREAMING_SNAKE_CASE__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): UpperCAmelCase_ : Tuple = get_linear_schedule_with_warmup( optimizer=SCREAMING_SNAKE_CASE__, num_warmup_steps=0, num_training_steps=SCREAMING_SNAKE_CASE__, ) else: UpperCAmelCase_ : Any = DummyScheduler(SCREAMING_SNAKE_CASE__, total_num_steps=SCREAMING_SNAKE_CASE__, warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) # We need to keep track of how many total steps we have iterated over UpperCAmelCase_ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : int = evaluate.load('''glue''', '''mrpc''' ) UpperCAmelCase_ : Optional[Any] = num_epochs if args.partial_train_epoch is not None: UpperCAmelCase_ : List[Any] = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) UpperCAmelCase_ : Tuple = args.resume_from_checkpoint.split('''epoch_''' )[1] UpperCAmelCase_ : int = '''''' for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break UpperCAmelCase_ : Union[str, Any] = int(SCREAMING_SNAKE_CASE__ ) + 1 UpperCAmelCase_ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint performance:''', SCREAMING_SNAKE_CASE__ ) accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''', lr_scheduler.get_lr()[0] ) accelerator.print('''resumed optimizers\'s lr:''', optimizer.param_groups[0]['''lr'''] ) with open(os.path.join(args.output_dir, F"""state_{starting_epoch-1}.json""" ), '''r''' ) as f: UpperCAmelCase_ : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model UpperCAmelCase_ : int = {} for epoch in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): model.train() for step, batch in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : Optional[int] = model(**SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Any = outputs.loss UpperCAmelCase_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(SCREAMING_SNAKE_CASE__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 UpperCAmelCase_ : Tuple = F"""epoch_{epoch}""" UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir, SCREAMING_SNAKE_CASE__ ) accelerator.save_state(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = evaluation_loop(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = accuracy UpperCAmelCase_ : Any = lr_scheduler.get_lr()[0] UpperCAmelCase_ : List[str] = optimizer.param_groups[0]['''lr'''] UpperCAmelCase_ : Tuple = epoch UpperCAmelCase_ : Dict = overall_step accelerator.print(F"""epoch {epoch}:""", SCREAMING_SNAKE_CASE__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir, F"""state_{epoch}.json""" ), '''w''' ) as f: json.dump(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> List[str]: UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, default='''bert-base-cased''', help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--output_dir''', type=SCREAMING_SNAKE_CASE__, default='''.''', help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''', ) parser.add_argument( '''--resume_from_checkpoint''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If the training should continue from a checkpoint folder.''', ) parser.add_argument( '''--partial_train_epoch''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''If passed, the training will stop after this number of epochs.''', ) parser.add_argument( '''--num_epochs''', type=SCREAMING_SNAKE_CASE__, default=2, help='''Number of train epochs.''', ) UpperCAmelCase_ : Optional[int] = parser.parse_args() UpperCAmelCase_ : List[Any] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
1
'''simple docstring''' import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) snake_case_ : Any = logging.getLogger(__name__) snake_case_ : Dict = {"facebook/bart-base": BartForConditionalGeneration} snake_case_ : Optional[Any] = {"facebook/bart-base": BartTokenizer} def lowerCamelCase_ ( ) -> Optional[int]: UpperCAmelCase_ : List[str] = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' ) parser.add_argument( '''--validation_file''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''A csv or a json file containing the validation data.''' ) parser.add_argument( '''--max_length''', type=SCREAMING_SNAKE_CASE__, default=5, help='''The maximum total input sequence length after tokenization.''', ) parser.add_argument( '''--num_beams''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help=( '''Number of beams to use for evaluation. This argument will be ''' '''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.''' ), ) parser.add_argument( '''--model_name_or_path''', type=SCREAMING_SNAKE_CASE__, help='''Path to pretrained model or model identifier from huggingface.co/models.''', required=SCREAMING_SNAKE_CASE__, ) parser.add_argument( '''--config_name''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''Pretrained config name or path if not the same as model_name''', ) parser.add_argument( '''--device''', type=SCREAMING_SNAKE_CASE__, default='''cpu''', help='''Device where the model will be run''', ) parser.add_argument('''--output_file_path''', type=SCREAMING_SNAKE_CASE__, default=SCREAMING_SNAKE_CASE__, help='''Where to store the final ONNX file.''' ) UpperCAmelCase_ : Optional[int] = parser.parse_args() return args def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any], SCREAMING_SNAKE_CASE__ : Union[str, Any]="cpu" ) -> Tuple: UpperCAmelCase_ : Tuple = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__ ) if model_name in ["facebook/bart-base"]: UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Any = None UpperCAmelCase_ : List[Any] = 0 return huggingface_model, tokenizer def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Tuple, SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Optional[int], SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any: model.eval() UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : Union[str, Any] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__ ) ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = '''My friends are cool but they eat too many carbs.''' UpperCAmelCase_ : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='''pt''' ).to(model.device ) UpperCAmelCase_ : Optional[int] = model.generate( inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], num_beams=SCREAMING_SNAKE_CASE__, max_length=SCREAMING_SNAKE_CASE__, early_stopping=SCREAMING_SNAKE_CASE__, decoder_start_token_id=model.config.decoder_start_token_id, ) torch.onnx.export( SCREAMING_SNAKE_CASE__, ( inputs['''input_ids'''], inputs['''attention_mask'''], num_beams, max_length, model.config.decoder_start_token_id, ), SCREAMING_SNAKE_CASE__, opset_version=14, input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''], output_names=['''output_ids'''], dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''seq'''}, '''output_ids''': {0: '''batch''', 1: '''seq_out'''}, }, example_outputs=SCREAMING_SNAKE_CASE__, ) logger.info('''Model exported to {}'''.format(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : Union[str, Any] = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__ ) ) logger.info('''Deduplicated and optimized model written to {}'''.format(SCREAMING_SNAKE_CASE__ ) ) UpperCAmelCase_ : Dict = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[int] = ort_sess.run( SCREAMING_SNAKE_CASE__, { '''input_ids''': inputs['''input_ids'''].cpu().numpy(), '''attention_mask''': inputs['''attention_mask'''].cpu().numpy(), '''num_beams''': np.array(SCREAMING_SNAKE_CASE__ ), '''max_length''': np.array(SCREAMING_SNAKE_CASE__ ), '''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ), }, ) np.testing.assert_allclose(summary_ids.cpu().numpy(), ort_out[0], rtol=1E-3, atol=1E-3 ) logger.info('''Model outputs from torch and ONNX Runtime are similar.''' ) logger.info('''Success.''' ) def lowerCamelCase_ ( ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = parse_args() UpperCAmelCase_ : List[str] = 5 UpperCAmelCase_ : Optional[Any] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO, ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() UpperCAmelCase_ : Dict = torch.device(args.device ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = load_model_tokenizer(args.model_name_or_path, SCREAMING_SNAKE_CASE__ ) if model.config.decoder_start_token_id is None: raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' ) model.to(SCREAMING_SNAKE_CASE__ ) if args.max_length: UpperCAmelCase_ : Tuple = args.max_length if args.num_beams: UpperCAmelCase_ : Optional[int] = args.num_beams if args.output_file_path: UpperCAmelCase_ : Optional[int] = args.output_file_path else: UpperCAmelCase_ : Dict = '''BART.onnx''' logger.info('''Exporting model to ONNX''' ) export_and_validate_model(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": main()
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[int] ) -> list[list[int]]: UpperCAmelCase_ : int = [] if len(SCREAMING_SNAKE_CASE__ ) == 1: return [nums.copy()] for _ in range(len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ : List[Any] = nums.pop(0 ) UpperCAmelCase_ : Optional[Any] = permute(SCREAMING_SNAKE_CASE__ ) for perm in permutations: perm.append(SCREAMING_SNAKE_CASE__ ) result.extend(SCREAMING_SNAKE_CASE__ ) nums.append(SCREAMING_SNAKE_CASE__ ) return result def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: def backtrack(SCREAMING_SNAKE_CASE__ : Union[str, Any] ): if start == len(SCREAMING_SNAKE_CASE__ ) - 1: output.append(nums[:] ) else: for i in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase_ , UpperCAmelCase_ : int = nums[i], nums[start] # backtrack UpperCAmelCase_ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function snake_case_ : Tuple = permutea([1, 2, 3]) print(res) doctest.testmod()
644
1
'''simple docstring''' import math def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : float, SCREAMING_SNAKE_CASE__ : float ) -> float: if initial_intensity < 0: raise ValueError('''The value of intensity cannot be negative''' ) # handling of negative values of initial intensity if angle < 0 or angle > 360: raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(SCREAMING_SNAKE_CASE__ ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
644
'''simple docstring''' class __a : def __init__( self : List[Any] , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : Tuple = [0] * size UpperCAmelCase_ : Optional[Any] = [0] * size @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return index | (index + 1) @staticmethod def UpperCAmelCase__ ( __magic_name__ : int ) -> int: """simple docstring""" return (index & (index + 1)) - 1 def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : int = value while index < self.size: UpperCAmelCase_ : str = self.get_prev(__magic_name__ ) + 1 if current_left_border == index: UpperCAmelCase_ : List[str] = value else: UpperCAmelCase_ : Optional[int] = max(__magic_name__ , __magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = self.get_next(__magic_name__ ) def UpperCAmelCase__ ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int: """simple docstring""" right -= 1 # Because of right is exclusive UpperCAmelCase_ : List[str] = 0 while left <= right: UpperCAmelCase_ : Optional[Any] = self.get_prev(__magic_name__ ) if left <= current_left: UpperCAmelCase_ : Dict = max(__magic_name__ , self.tree[right] ) UpperCAmelCase_ : Optional[Any] = current_left else: UpperCAmelCase_ : str = max(__magic_name__ , self.arr[right] ) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
644
1
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : List[Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Any = ["pixel_values"] def __init__( self : Optional[Any] , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : bool = True , __magic_name__ : Dict[str, int] = None , __magic_name__ : bool = True , __magic_name__ : Union[int, float] = 1 / 2_55 , __magic_name__ : bool = True , __magic_name__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __magic_name__ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__magic_name__ : List[str] , ) -> None: """simple docstring""" super().__init__(**__magic_name__ ) UpperCAmelCase_ : Any = size if size is not None else {'''shortest_edge''': 2_24} UpperCAmelCase_ : int = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) UpperCAmelCase_ : Any = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} UpperCAmelCase_ : Tuple = get_size_dict(__magic_name__ , param_name='''crop_size''' ) UpperCAmelCase_ : Tuple = do_resize UpperCAmelCase_ : List[str] = size UpperCAmelCase_ : int = resample UpperCAmelCase_ : Optional[int] = do_center_crop UpperCAmelCase_ : Dict = crop_size UpperCAmelCase_ : str = do_rescale UpperCAmelCase_ : List[str] = rescale_factor UpperCAmelCase_ : Tuple = do_normalize UpperCAmelCase_ : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase_ : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase__ ( self : Any , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : PILImageResampling = PILImageResampling.BICUBIC , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase_ : Dict = int((2_56 / 2_24) * size['''shortest_edge'''] ) UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(__magic_name__ , size=__magic_name__ , default_to_square=__magic_name__ ) UpperCAmelCase_ : Any = {'''height''': output_size[0], '''width''': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( __magic_name__ , size=(size_dict['''height'''], size_dict['''width''']) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : np.ndarray , __magic_name__ : Dict[str, int] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Dict , ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ : Optional[Any] = get_size_dict(__magic_name__ ) if "height" not in size or "width" not in size: raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(__magic_name__ , size=(size['''height'''], size['''width''']) , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[int, float] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : int , ) -> np.ndarray: """simple docstring""" return rescale(__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : np.ndarray , __magic_name__ : Union[float, List[float]] , __magic_name__ : Union[float, List[float]] , __magic_name__ : Optional[Union[str, ChannelDimension]] = None , **__magic_name__ : Tuple , ) -> np.ndarray: """simple docstring""" return normalize(__magic_name__ , mean=__magic_name__ , std=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : ImageInput , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : PILImageResampling = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Dict[str, int]] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[float] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[float, Iterable[float]]] = None , __magic_name__ : Optional[Union[float, Iterable[float]]] = None , __magic_name__ : Optional[TensorType] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Any , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : List[Any] = resample if resample is not None else self.resample UpperCAmelCase_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : str = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : Any = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Optional[int] = size if size is not None else self.size UpperCAmelCase_ : List[Any] = get_size_dict(__magic_name__ , default_to_square=__magic_name__ ) UpperCAmelCase_ : Optional[int] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : int = get_size_dict(__magic_name__ , param_name='''crop_size''' ) UpperCAmelCase_ : int = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : str = [to_numpy_array(__magic_name__ ) for image in images] if do_resize: UpperCAmelCase_ : Union[str, Any] = [self.resize(__magic_name__ , __magic_name__ , __magic_name__ ) for image in images] if do_center_crop: UpperCAmelCase_ : Optional[int] = [self.center_crop(__magic_name__ , __magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Optional[Any] = [self.rescale(__magic_name__ , __magic_name__ ) for image in images] if do_normalize: UpperCAmelCase_ : Union[str, Any] = [self.normalize(__magic_name__ , __magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : List[str] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : Dict = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
'''simple docstring''' import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : List[str] , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : Union[str, Any]=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=True , __magic_name__ : List[str]=False , __magic_name__ : Optional[int]=True , __magic_name__ : Dict=99 , __magic_name__ : Tuple=32 , __magic_name__ : int=5 , __magic_name__ : Dict=4 , __magic_name__ : Tuple=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : str=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : int=2 , __magic_name__ : List[Any]=0.0_2 , __magic_name__ : Tuple=3 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Optional[int]=None , ) -> str: """simple docstring""" UpperCAmelCase_ : Any = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = seq_length UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Any = use_input_mask UpperCAmelCase_ : List[str] = use_token_type_ids UpperCAmelCase_ : Union[str, Any] = use_labels UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : str = type_vocab_size UpperCAmelCase_ : Optional[Any] = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[int] = num_choices UpperCAmelCase_ : Tuple = scope def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : str = None if self.use_token_type_ids: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Optional[int] , ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptForCausalLM(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : str , *__magic_name__ : Any ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() # create attention mask UpperCAmelCase_ : Optional[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) UpperCAmelCase_ : Any = self.seq_length // 2 UpperCAmelCase_ : Tuple = 0 # first forward pass UpperCAmelCase_ , UpperCAmelCase_ : Dict = model(__magic_name__ , attention_mask=__magic_name__ ).to_tuple() # create hypothetical next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids UpperCAmelCase_ : List[str] = ids_tensor((1,) , __magic_name__ ).item() + 1 UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) UpperCAmelCase_ : str = random_other_next_tokens # append to next input_ids and attn_mask UpperCAmelCase_ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : int = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__magic_name__ )] , dim=1 , ) # get two different outputs UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : int = model(__magic_name__ , past_key_values=__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] # select random slice UpperCAmelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Union[str, Any] = output_from_no_past[:, -1, random_slice_idx].detach() UpperCAmelCase_ : Dict = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Dict , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , *__magic_name__ : str ) -> int: """simple docstring""" UpperCAmelCase_ : Dict = BioGptModel(config=__magic_name__ ).to(__magic_name__ ).eval() UpperCAmelCase_ : Optional[int] = torch.ones(input_ids.shape , dtype=torch.long , device=__magic_name__ ) # first forward pass UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Optional[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : List[str] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )['''last_hidden_state'''] UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[ '''last_hidden_state''' ] # select random slice UpperCAmelCase_ : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , *__magic_name__ : Any , __magic_name__ : List[Any]=False ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = BioGptForCausalLM(__magic_name__ ) model.to(__magic_name__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() UpperCAmelCase_ : List[str] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : List[str] ) -> str: """simple docstring""" UpperCAmelCase_ : int = BioGptModel(__magic_name__ ) UpperCAmelCase_ : Dict = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def UpperCAmelCase__ ( self : int , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , *__magic_name__ : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : str = self.num_labels UpperCAmelCase_ : Any = BioGptForTokenClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : int = config_and_inputs UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : str = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __a : List[Any] = (BioGptForCausalLM,) if is_torch_available() else () __a : Union[str, Any] = ( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) __a : List[str] = False def UpperCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = BioGptModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : str = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*__magic_name__ , gradient_checkpointing=__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*__magic_name__ ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) UpperCAmelCase_ : List[str] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : Tuple = '''left''' # Define PAD Token = EOS Token = 50256 UpperCAmelCase_ : List[Any] = tokenizer.eos_token UpperCAmelCase_ : List[Any] = model.config.eos_token_id # use different length sentences to test batching UpperCAmelCase_ : Tuple = [ '''Hello, my dog is a little''', '''Today, I''', ] UpperCAmelCase_ : Optional[Any] = tokenizer(__magic_name__ , return_tensors='''pt''' , padding=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inputs['''input_ids'''].to(__magic_name__ ) UpperCAmelCase_ : Any = model.generate( input_ids=__magic_name__ , attention_mask=inputs['''attention_mask'''].to(__magic_name__ ) , ) UpperCAmelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ ) UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() UpperCAmelCase_ : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(__magic_name__ ) UpperCAmelCase_ : Tuple = model.generate(input_ids=__magic_name__ , max_length=model.config.max_length - num_paddings ) UpperCAmelCase_ : int = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(__magic_name__ , __magic_name__ ) self.assertListEqual(__magic_name__ , [non_padded_sentence, padded_sentence] ) @slow def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def UpperCAmelCase__ ( self : Tuple ) -> str: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = 3 UpperCAmelCase_ : Tuple = input_dict['''input_ids'''] UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase_ : Dict = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : int = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : Optional[int] = '''multi_label_classification''' UpperCAmelCase_ : int = input_dict['''input_ids'''] UpperCAmelCase_ : str = input_ids.ne(1 ).to(__magic_name__ ) UpperCAmelCase_ : Any = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase_ : Union[str, Any] = BioGptForSequenceClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class __a (unittest.TestCase ): @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : List[str] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) UpperCAmelCase_ : str = model(__magic_name__ )[0] UpperCAmelCase_ : Optional[int] = 4_23_84 UpperCAmelCase_ : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , __magic_name__ ) UpperCAmelCase_ : List[Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __magic_name__ , atol=1E-4 ) ) @slow def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Any = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) UpperCAmelCase_ : str = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(__magic_name__ ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(__magic_name__ ) UpperCAmelCase_ : Optional[int] = model.generate( **__magic_name__ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__magic_name__ , ) UpperCAmelCase_ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(__magic_name__ , __magic_name__ )
644
1
'''simple docstring''' import sys snake_case_ : Optional[int] = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str = N ) -> int: UpperCAmelCase_ : str = -sys.maxsize - 1 for i in range(len(SCREAMING_SNAKE_CASE__ ) - 12 ): UpperCAmelCase_ : Union[str, Any] = 1 for j in range(13 ): product *= int(n[i + j] ) if product > largest_product: UpperCAmelCase_ : List[Any] = product return largest_product if __name__ == "__main__": print(f'''{solution() = }''')
644
'''simple docstring''' import json import os import unittest from transformers.models.blenderbot_small.tokenization_blenderbot_small import ( VOCAB_FILES_NAMES, BlenderbotSmallTokenizer, ) from ...test_tokenization_common import TokenizerTesterMixin class __a (lowerCamelCase , unittest.TestCase ): __a : List[str] = BlenderbotSmallTokenizer __a : List[Any] = False def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" super().setUp() UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__'''] UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) ) UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', ''''''] UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''} UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(__magic_name__ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(__magic_name__ ) ) def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : str = '''adapt act apte''' UpperCAmelCase_ : Tuple = '''adapt act apte''' return input_text, output_text def UpperCAmelCase__ ( self : str ) -> Any: """simple docstring""" UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = '''adapt act apte''' UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te'''] UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ ) self.assertListEqual(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token] UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5] self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ ) def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) assert tok('''sam''' ).input_ids == [13_84] UpperCAmelCase_ : Optional[int] = '''I am a small frog.''' UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0] assert src_text != decoded # I wish it did! assert decoded == "i am a small frog ." def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' ) UpperCAmelCase_ : List[Any] = '''I am a small frog .''' UpperCAmelCase_ : Any = '''.''' UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids'''] UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids'''] assert encoded[-1] == encoded_dot[0]
644
1
'''simple docstring''' from itertools import permutations def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple ) -> bool: if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False UpperCAmelCase_ : str = [7, 11, 13, 17] for i, test in enumerate(SCREAMING_SNAKE_CASE__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10 ) -> int: return sum( int(''''''.join(map(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) ) ) for num in permutations(range(SCREAMING_SNAKE_CASE__ ) ) if is_substring_divisible(SCREAMING_SNAKE_CASE__ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
644
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : Dict = get_activation('''swish''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = get_activation('''silu''' ) self.assertIsInstance(__magic_name__ , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Optional[int] = get_activation('''mish''' ) self.assertIsInstance(__magic_name__ , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = get_activation('''gelu''' ) self.assertIsInstance(__magic_name__ , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
644
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Dict = { "configuration_clipseg": [ "CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPSegConfig", "CLIPSegTextConfig", "CLIPSegVisionConfig", ], "processing_clipseg": ["CLIPSegProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : Optional[int] = [ "CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPSegModel", "CLIPSegPreTrainedModel", "CLIPSegTextModel", "CLIPSegVisionModel", "CLIPSegForImageSegmentation", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys snake_case_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
644
'''simple docstring''' from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case_ : Union[str, Any] = logging.get_logger(__name__) class __a (lowerCamelCase ): __a : Tuple = ["pixel_values"] def __init__( self : List[Any] , __magic_name__ : bool = True , __magic_name__ : int = 32 , __magic_name__ : Union[str, Any]=PILImageResampling.BILINEAR , __magic_name__ : bool = True , **__magic_name__ : List[str] , ) -> None: """simple docstring""" UpperCAmelCase_ : int = do_resize UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : List[Any] = size_divisor UpperCAmelCase_ : Any = resample super().__init__(**__magic_name__ ) def UpperCAmelCase__ ( self : Optional[Any] , __magic_name__ : np.ndarray , __magic_name__ : int , __magic_name__ : str , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Tuple ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_image_size(__magic_name__ ) # Rounds the height and width down to the closest multiple of size_divisor UpperCAmelCase_ : Dict = height // size_divisor * size_divisor UpperCAmelCase_ : Dict = width // size_divisor * size_divisor UpperCAmelCase_ : Any = resize(__magic_name__ , (new_h, new_w) , resample=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) return image def UpperCAmelCase__ ( self : int , __magic_name__ : np.ndarray , __magic_name__ : float , __magic_name__ : Optional[ChannelDimension] = None , **__magic_name__ : Optional[Any] ) -> np.ndarray: """simple docstring""" return rescale(image=__magic_name__ , scale=__magic_name__ , data_format=__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : str , __magic_name__ : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[int] = None , __magic_name__ : Any=None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[Union[TensorType, str]] = None , __magic_name__ : ChannelDimension = ChannelDimension.FIRST , **__magic_name__ : Tuple , ) -> BatchFeature: """simple docstring""" UpperCAmelCase_ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : Any = size_divisor if size_divisor is not None else self.size_divisor UpperCAmelCase_ : Dict = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) UpperCAmelCase_ : Optional[int] = make_list_of_images(__magic_name__ ) if not valid_images(__magic_name__ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. UpperCAmelCase_ : List[str] = [to_numpy_array(__magic_name__ ) for img in images] if do_resize: UpperCAmelCase_ : str = [self.resize(__magic_name__ , size_divisor=__magic_name__ , resample=__magic_name__ ) for image in images] if do_rescale: UpperCAmelCase_ : Tuple = [self.rescale(__magic_name__ , scale=1 / 2_55 ) for image in images] UpperCAmelCase_ : Union[str, Any] = [to_channel_dimension_format(__magic_name__ , __magic_name__ ) for image in images] UpperCAmelCase_ : int = {'''pixel_values''': images} return BatchFeature(data=__magic_name__ , tensor_type=__magic_name__ )
644
1
'''simple docstring''' import inspect import unittest from math import floor from transformers import CvtConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import CvtForImageClassification, CvtModel from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __a (lowerCamelCase ): def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__magic_name__ , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(__magic_name__ , '''num_heads''' ) ) class __a : def __init__( self : List[str] , __magic_name__ : Dict , __magic_name__ : Tuple=13 , __magic_name__ : Any=64 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Tuple=[16, 48, 96] , __magic_name__ : Optional[Any]=[1, 3, 6] , __magic_name__ : str=[1, 2, 10] , __magic_name__ : str=[7, 3, 3] , __magic_name__ : Dict=[4, 2, 2] , __magic_name__ : Any=[2, 1, 1] , __magic_name__ : str=[2, 2, 2] , __magic_name__ : Union[str, Any]=[False, False, True] , __magic_name__ : Dict=[0.0, 0.0, 0.0] , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=1E-12 , __magic_name__ : Optional[Any]=True , __magic_name__ : List[str]=True , __magic_name__ : int=2 , ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : int = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : Dict = image_size UpperCAmelCase_ : Tuple = patch_sizes UpperCAmelCase_ : List[Any] = patch_stride UpperCAmelCase_ : Dict = patch_padding UpperCAmelCase_ : Union[str, Any] = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : Union[str, Any] = num_labels UpperCAmelCase_ : Any = num_channels UpperCAmelCase_ : Union[str, Any] = embed_dim UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : int = stride_kv UpperCAmelCase_ : Any = depth UpperCAmelCase_ : Tuple = cls_token UpperCAmelCase_ : str = attention_drop_rate UpperCAmelCase_ : Any = initializer_range UpperCAmelCase_ : Optional[Any] = layer_norm_eps def UpperCAmelCase__ ( self : int ) -> Any: """simple docstring""" UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Any = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : str ) -> Optional[Any]: """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : str , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : Dict ) -> List[str]: """simple docstring""" UpperCAmelCase_ : int = CvtModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Any = model(__magic_name__ ) UpperCAmelCase_ : Optional[int] = (self.image_size, self.image_size) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1] for i in range(len(self.depth ) ): UpperCAmelCase_ : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) UpperCAmelCase_ : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : Any , __magic_name__ : int ) -> Tuple: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[str] = CvtForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : str = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = config_and_inputs UpperCAmelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Union[str, Any] = (CvtModel, CvtForImageClassification) if is_torch_available() else () __a : Union[str, Any] = ( {"feature-extraction": CvtModel, "image-classification": CvtForImageClassification} if is_torch_available() else {} ) __a : Dict = False __a : List[Any] = False __a : Optional[Any] = False __a : List[str] = False __a : str = False def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" UpperCAmelCase_ : int = CvtModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : int ) -> Optional[int]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" return @unittest.skip(reason='''Cvt does not output attentions''' ) def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple: """simple docstring""" pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase__ ( self : Any ) -> int: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = model_class(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : str = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : int ): UpperCAmelCase_ : List[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : Any = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) UpperCAmelCase_ : Optional[Any] = outputs.hidden_states UpperCAmelCase_ : List[str] = len(self.model_tester.depth ) self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Any = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def UpperCAmelCase__ ( self : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" pass @slow def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : List[Any] = CvtModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> str: UpperCAmelCase_ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __a (unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCAmelCase__ ( self : List[Any] ) -> str: """simple docstring""" UpperCAmelCase_ : List[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__magic_name__ ) UpperCAmelCase_ : List[Any] = self.default_image_processor UpperCAmelCase_ : Any = prepare_img() UpperCAmelCase_ : Any = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : str = model(**__magic_name__ ) # verify the logits UpperCAmelCase_ : Dict = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 10, SCREAMING_SNAKE_CASE__ : int = 22 ) -> int: UpperCAmelCase_ : Optional[int] = range(1, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = range(1, SCREAMING_SNAKE_CASE__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
644
1
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness snake_case_ : List[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" snake_case_ : Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" snake_case_ : Any = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" snake_case_ : Union[str, Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" snake_case_ : Tuple = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __a (datasets.Metric ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Sequence(datasets.Value('''string''' ) ), '''references''': datasets.Value('''string''' ), } ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , ) def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str=[1, 10, 1_00] , __magic_name__ : Optional[Any]=4 , __magic_name__ : List[Any]=3.0 ) -> int: """simple docstring""" if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('''This metric is currently not supported on Windows.''' ) with ThreadPoolExecutor(max_workers=__magic_name__ ) as executor: UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Union[str, Any] = Counter() UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Union[str, Any] = defaultdict(__magic_name__ ) for task_id, (candidates, test_case) in enumerate(zip(__magic_name__ , __magic_name__ ) ): for candidate in candidates: UpperCAmelCase_ : Tuple = candidate + '''\n''' + test_case UpperCAmelCase_ : int = (test_program, timeout, task_id, completion_id[task_id]) UpperCAmelCase_ : Optional[int] = executor.submit(__magic_name__ , *__magic_name__ ) futures.append(__magic_name__ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__magic_name__ ): UpperCAmelCase_ : Dict = future.result() results[result["task_id"]].append((result['''completion_id'''], result) ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = [], [] for result in results.values(): result.sort() UpperCAmelCase_ : Dict = [r[1]['''passed'''] for r in result] total.append(len(__magic_name__ ) ) correct.append(sum(__magic_name__ ) ) UpperCAmelCase_ : str = np.array(__magic_name__ ) UpperCAmelCase_ : str = np.array(__magic_name__ ) UpperCAmelCase_ : List[str] = k UpperCAmelCase_ : Optional[Any] = {F"""pass@{k}""": estimate_pass_at_k(__magic_name__ , __magic_name__ , __magic_name__ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: def estimator(SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1, n + 1 ) ) if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : List[Any] = itertools.repeat(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ) else: assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Dict = iter(SCREAMING_SNAKE_CASE__ ) return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ), int(SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )] )
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class __a (lowerCamelCase ): __a : int = "dandelin/vilt-b32-finetuned-vqa" __a : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) __a : Any = "image_qa" __a : str = AutoProcessor __a : Any = AutoModelForVisualQuestionAnswering __a : List[Any] = ["image", "text"] __a : int = ["text"] def __init__( self : Tuple , *__magic_name__ : Any , **__magic_name__ : Any ) -> Tuple: """simple docstring""" requires_backends(self , ['''vision'''] ) super().__init__(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : "Image" , __magic_name__ : str ) -> Tuple: """simple docstring""" return self.pre_processor(__magic_name__ , __magic_name__ , return_tensors='''pt''' ) def UpperCAmelCase__ ( self : Any , __magic_name__ : List[str] ) -> Optional[Any]: """simple docstring""" with torch.no_grad(): return self.model(**__magic_name__ ).logits def UpperCAmelCase__ ( self : int , __magic_name__ : int ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
644
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> Optional[Any]: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def UpperCAmelCase__ ( self : Optional[int] ) -> int: """simple docstring""" UpperCAmelCase_ : int = 1 UpperCAmelCase_ : Union[str, Any] = 3 UpperCAmelCase_ : Union[str, Any] = (32, 32) UpperCAmelCase_ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__magic_name__ ) return image @property def UpperCAmelCase__ ( self : Tuple ) -> List[Any]: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def UpperCAmelCase__ ( self : Optional[Any] ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def UpperCAmelCase__ ( self : List[str] ) -> Dict: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModel(__magic_name__ ) @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" def extract(*__magic_name__ : int , **__magic_name__ : Optional[Any] ): class __a : def __init__( self : Any ) -> Any: """simple docstring""" UpperCAmelCase_ : Optional[Any] = torch.ones([0] ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Tuple ) -> Dict: """simple docstring""" self.pixel_values.to(__magic_name__ ) return self return Out() return extract def UpperCAmelCase__ ( self : str ) -> int: """simple docstring""" UpperCAmelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Any = self.dummy_cond_unet UpperCAmelCase_ : Any = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__magic_name__ , set_alpha_to_one=__magic_name__ , ) UpperCAmelCase_ : List[str] = self.dummy_vae UpperCAmelCase_ : Any = self.dummy_text_encoder UpperCAmelCase_ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Dict = StableDiffusionPipeline( unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , ) UpperCAmelCase_ : str = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : Tuple = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) UpperCAmelCase_ : List[Any] = output.images UpperCAmelCase_ : List[str] = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : List[Any] = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0] UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : Optional[Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_ : Union[str, Any] = self.dummy_cond_unet UpperCAmelCase_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=__magic_name__ ) UpperCAmelCase_ : Tuple = self.dummy_vae UpperCAmelCase_ : List[Any] = self.dummy_text_encoder UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk UpperCAmelCase_ : int = StableDiffusionPipeline( unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , ) UpperCAmelCase_ : Union[str, Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : str = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ : str = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : List[str] = sd_pipe([prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) UpperCAmelCase_ : int = output.images UpperCAmelCase_ : str = torch.Generator(device=__magic_name__ ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__magic_name__ , )[0] UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ : str = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__magic_name__ ) assert isinstance(__magic_name__ , __magic_name__ ) assert isinstance(pipe.scheduler , __magic_name__ ) assert pipe.safety_checker is None UpperCAmelCase_ : Tuple = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Dict = StableDiffusionPipeline.from_pretrained(__magic_name__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase_ : int = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def UpperCAmelCase__ ( self : List[Any] ) -> int: """simple docstring""" UpperCAmelCase_ : str = self.dummy_cond_unet UpperCAmelCase_ : Optional[int] = PNDMScheduler(skip_prk_steps=__magic_name__ ) UpperCAmelCase_ : Optional[int] = self.dummy_vae UpperCAmelCase_ : List[str] = self.dummy_text_encoder UpperCAmelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 UpperCAmelCase_ : Tuple = unet.half() UpperCAmelCase_ : List[str] = vae.half() UpperCAmelCase_ : Union[str, Any] = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline( unet=__magic_name__ , scheduler=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=self.dummy_extractor , ) UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger''' UpperCAmelCase_ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ ) UpperCAmelCase_ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : Optional[int] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Tuple = ( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) UpperCAmelCase_ : Dict = 40_03_66_03_46 UpperCAmelCase_ : Optional[int] = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : Dict = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : int ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__magic_name__ ) UpperCAmelCase_ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase_ : List[Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = '''padme amidala taking a bath artwork, safe for work, no nudity''' UpperCAmelCase_ : Union[str, Any] = 27_34_97_17_55 UpperCAmelCase_ : str = 7 UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : Tuple = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) UpperCAmelCase_ : str = output.images UpperCAmelCase_ : int = image[0, -3:, -3:, -1] UpperCAmelCase_ : Dict = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 UpperCAmelCase_ : Optional[Any] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : int = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCAmelCase_ : Optional[int] = output.images UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) UpperCAmelCase_ : Optional[Any] = sd_pipe.to(__magic_name__ ) sd_pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : str = ( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) UpperCAmelCase_ : Any = 10_44_35_52_34 UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : int = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=0 , ) UpperCAmelCase_ : Union[str, Any] = output.images UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1] UpperCAmelCase_ : List[Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(__magic_name__ ) UpperCAmelCase_ : Any = sd_pipe( [prompt] , generator=__magic_name__ , guidance_scale=__magic_name__ , num_inference_steps=50 , output_type='''np''' , width=5_12 , height=5_12 , sld_guidance_scale=20_00 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) UpperCAmelCase_ : Tuple = output.images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[int] = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 5_12, 5_12, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
'''simple docstring''' from collections.abc import Iterable from typing import Any class __a : def __init__( self : Optional[Any] , __magic_name__ : int | None = None ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[str] = value UpperCAmelCase_ : Node | None = None # Added in order to delete a node easier UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : List[str] ) -> str: """simple docstring""" from pprint import pformat if self.left is None and self.right is None: return str(self.value ) return pformat({F"""{self.value}""": (self.left, self.right)} , indent=1 ) class __a : def __init__( self : int , __magic_name__ : Node | None = None ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = root def __str__( self : Any ) -> str: """simple docstring""" return str(self.root ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Node , __magic_name__ : Node | None ) -> None: """simple docstring""" if new_children is not None: # reset its kids UpperCAmelCase_ : Dict = node.parent if node.parent is not None: # reset its parent if self.is_right(__magic_name__ ): # If it is the right children UpperCAmelCase_ : Optional[Any] = new_children else: UpperCAmelCase_ : Optional[int] = new_children else: UpperCAmelCase_ : List[str] = new_children def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Node ) -> bool: """simple docstring""" if node.parent and node.parent.right: return node == node.parent.right return False def UpperCAmelCase__ ( self : Union[str, Any] ) -> bool: """simple docstring""" return self.root is None def UpperCAmelCase__ ( self : Any , __magic_name__ : str ) -> None: """simple docstring""" UpperCAmelCase_ : Tuple = Node(__magic_name__ ) # create a new Node if self.empty(): # if Tree is empty UpperCAmelCase_ : List[Any] = new_node # set its root else: # Tree is not empty UpperCAmelCase_ : str = self.root # from root if parent_node is None: return while True: # While we don't get to a leaf if value < parent_node.value: # We go left if parent_node.left is None: UpperCAmelCase_ : Union[str, Any] = new_node # We insert the new node in a leaf break else: UpperCAmelCase_ : List[Any] = parent_node.left else: if parent_node.right is None: UpperCAmelCase_ : List[Any] = new_node break else: UpperCAmelCase_ : Union[str, Any] = parent_node.right UpperCAmelCase_ : Union[str, Any] = parent_node def UpperCAmelCase__ ( self : Optional[Any] , *__magic_name__ : List[str] ) -> None: """simple docstring""" for value in values: self.__insert(__magic_name__ ) def UpperCAmelCase__ ( self : Dict , __magic_name__ : int ) -> Node | None: """simple docstring""" if self.empty(): raise IndexError('''Warning: Tree is empty! please use another.''' ) else: UpperCAmelCase_ : str = self.root # use lazy evaluation here to avoid NoneType Attribute error while node is not None and node.value is not value: UpperCAmelCase_ : List[str] = node.left if value < node.value else node.right return node def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: if self.root is None: return None UpperCAmelCase_ : Dict = self.root if not self.empty(): while node.right is not None: UpperCAmelCase_ : Any = node.right return node def UpperCAmelCase__ ( self : Dict , __magic_name__ : Node | None = None ) -> Node | None: """simple docstring""" if node is None: UpperCAmelCase_ : Optional[int] = self.root if self.root is None: return None if not self.empty(): UpperCAmelCase_ : Union[str, Any] = self.root while node.left is not None: UpperCAmelCase_ : Dict = node.left return node def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int ) -> None: """simple docstring""" UpperCAmelCase_ : List[str] = self.search(__magic_name__ ) # Look for the node with that label if node is not None: if node.left is None and node.right is None: # If it has no children self.__reassign_nodes(__magic_name__ , __magic_name__ ) elif node.left is None: # Has only right children self.__reassign_nodes(__magic_name__ , node.right ) elif node.right is None: # Has only left children self.__reassign_nodes(__magic_name__ , node.left ) else: UpperCAmelCase_ : List[str] = self.get_max( node.left ) # Gets the max value of the left branch self.remove(tmp_node.value ) # type: ignore UpperCAmelCase_ : Optional[int] = ( tmp_node.value # type: ignore ) # Assigns the value to the node to delete and keep tree structure def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : Node | None ) -> Iterable: """simple docstring""" if node is not None: yield node # Preorder Traversal yield from self.preorder_traverse(node.left ) yield from self.preorder_traverse(node.right ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any]=None ) -> Any: """simple docstring""" if traversal_function is None: return self.preorder_traverse(self.root ) else: return traversal_function(self.root ) def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : list , __magic_name__ : Node | None ) -> None: """simple docstring""" if node: self.inorder(__magic_name__ , node.left ) arr.append(node.value ) self.inorder(__magic_name__ , node.right ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : int , __magic_name__ : Node ) -> int: """simple docstring""" UpperCAmelCase_ : list[int] = [] self.inorder(__magic_name__ , __magic_name__ ) # append all values to list using inorder traversal return arr[k - 1] def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Node | None ) -> list[Node]: UpperCAmelCase_ : Any = [] if curr_node is not None: UpperCAmelCase_ : Any = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node] return node_list def lowerCamelCase_ ( ) -> None: UpperCAmelCase_ : str = (8, 3, 6, 1, 10, 14, 13, 4, 7) UpperCAmelCase_ : Tuple = BinarySearchTree() for i in testlist: t.insert(SCREAMING_SNAKE_CASE__ ) # Prints all the elements of the list in order traversal print(SCREAMING_SNAKE_CASE__ ) if t.search(6 ) is not None: print('''The value 6 exists''' ) else: print('''The value 6 doesn\'t exist''' ) if t.search(-1 ) is not None: print('''The value -1 exists''' ) else: print('''The value -1 doesn\'t exist''' ) if not t.empty(): print('''Max Value: ''', t.get_max().value ) # type: ignore print('''Min Value: ''', t.get_min().value ) # type: ignore for i in testlist: t.remove(SCREAMING_SNAKE_CASE__ ) print(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str: return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
644
'''simple docstring''' import sys import turtle def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : tuple[float, float], SCREAMING_SNAKE_CASE__ : int, ) -> None: my_pen.up() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) my_pen.goto(vertexa[0], vertexa[1] ) if depth == 0: return triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) triangle(SCREAMING_SNAKE_CASE__, get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), get_mid(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) snake_case_ : Any = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") snake_case_ : Tuple = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str = "The quick brown fox jumps over the lazy dog", ) -> bool: UpperCAmelCase_ : List[str] = set() # Replace all the whitespace in our sentence UpperCAmelCase_ : Union[str, Any] = input_str.replace(''' ''', '''''' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(SCREAMING_SNAKE_CASE__ ) == 26 def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str = "The quick brown fox jumps over the lazy dog", ) -> bool: UpperCAmelCase_ : int = [False] * 26 for char in input_str: if char.islower(): UpperCAmelCase_ : Dict = True elif char.isupper(): UpperCAmelCase_ : List[Any] = True return all(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str = "The quick brown fox jumps over the lazy dog", ) -> bool: return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def lowerCamelCase_ ( ) -> None: from timeit import timeit UpperCAmelCase_ : Any = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest''' print(timeit('''is_pangram()''', setup=SCREAMING_SNAKE_CASE__ ) ) print(timeit('''is_pangram_faster()''', setup=SCREAMING_SNAKE_CASE__ ) ) print(timeit('''is_pangram_fastest()''', setup=SCREAMING_SNAKE_CASE__ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
644
'''simple docstring''' import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device snake_case_ : List[str] = False class __a (unittest.TestCase ): pass @nightly @require_torch_gpu class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> str: """simple docstring""" # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : List[Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__magic_name__ ) UpperCAmelCase_ : Optional[int] = VersatileDiffusionPipeline.from_pretrained(__magic_name__ , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Any = generator.manual_seed(0 ) UpperCAmelCase_ : Dict = pipe.dual_guided( prompt='''first prompt''' , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def UpperCAmelCase__ ( self : str ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : str = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = '''cyberpunk 2077''' UpperCAmelCase_ : Union[str, Any] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe.dual_guided( prompt=__magic_name__ , image=__magic_name__ , text_to_image_strength=0.7_5 , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images UpperCAmelCase_ : List[str] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Union[str, Any] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = '''A painting of a squirrel eating a burger ''' UpperCAmelCase_ : Optional[int] = torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = pipe.text_to_image( prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : Any = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 UpperCAmelCase_ : Tuple = pipe.image_variation(__magic_name__ , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) UpperCAmelCase_ : List[str] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
644
1
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Union[str, Any]=1E-12 ) -> Optional[Any]: UpperCAmelCase_ : Any = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE__, axis=1 ), a_min=SCREAMING_SNAKE_CASE__ ) ).T UpperCAmelCase_ : Optional[int] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(SCREAMING_SNAKE_CASE__, axis=1 ), a_min=SCREAMING_SNAKE_CASE__ ) ).T return jnp.matmul(SCREAMING_SNAKE_CASE__, norm_emb_a.T ) class __a (nn.Module ): __a : CLIPConfig __a : jnp.dtype = jnp.floataa def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = FlaxCLIPVisionModule(self.config.vision_config ) UpperCAmelCase_ : Tuple = nn.Dense(self.config.projection_dim , use_bias=__magic_name__ , dtype=self.dtype ) UpperCAmelCase_ : Optional[Any] = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) ) UpperCAmelCase_ : Union[str, Any] = self.param( '''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCAmelCase_ : Tuple = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) ) UpperCAmelCase_ : Tuple = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) ) def __call__( self : Any , __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Dict = self.vision_model(__magic_name__ )[1] UpperCAmelCase_ : Optional[Any] = self.visual_projection(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = jax_cosine_distance(__magic_name__ , self.special_care_embeds ) UpperCAmelCase_ : Optional[int] = jax_cosine_distance(__magic_name__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCAmelCase_ : Any = 0.0 UpperCAmelCase_ : Any = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCAmelCase_ : List[Any] = jnp.round(__magic_name__ , 3 ) UpperCAmelCase_ : Optional[Any] = jnp.any(special_scores > 0 , axis=1 , keepdims=__magic_name__ ) # Use a lower threshold if an image has any special care concept UpperCAmelCase_ : Union[str, Any] = is_special_care * 0.0_1 UpperCAmelCase_ : str = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCAmelCase_ : Optional[Any] = jnp.round(__magic_name__ , 3 ) UpperCAmelCase_ : Optional[Any] = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class __a (lowerCamelCase ): __a : str = CLIPConfig __a : Optional[Any] = "clip_input" __a : List[str] = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Optional[Any] , __magic_name__ : CLIPConfig , __magic_name__ : Optional[Tuple] = None , __magic_name__ : int = 0 , __magic_name__ : jnp.dtype = jnp.floataa , __magic_name__ : bool = True , **__magic_name__ : int , ) -> Any: """simple docstring""" if input_shape is None: UpperCAmelCase_ : List[str] = (1, 2_24, 2_24, 3) UpperCAmelCase_ : Dict = self.module_class(config=__magic_name__ , dtype=__magic_name__ , **__magic_name__ ) super().__init__(__magic_name__ , __magic_name__ , input_shape=__magic_name__ , seed=__magic_name__ , dtype=__magic_name__ , _do_init=_do_init ) def UpperCAmelCase__ ( self : Tuple , __magic_name__ : jax.random.KeyArray , __magic_name__ : Tuple , __magic_name__ : FrozenDict = None ) -> FrozenDict: """simple docstring""" # init input tensor UpperCAmelCase_ : Dict = jax.random.normal(__magic_name__ , __magic_name__ ) UpperCAmelCase_ , UpperCAmelCase_ : str = jax.random.split(__magic_name__ ) UpperCAmelCase_ : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng} UpperCAmelCase_ : Optional[Any] = self.module.init(__magic_name__ , __magic_name__ )['''params'''] return random_params def __call__( self : Optional[Any] , __magic_name__ : Any , __magic_name__ : dict = None , ) -> int: """simple docstring""" UpperCAmelCase_ : Optional[Any] = jnp.transpose(__magic_name__ , (0, 2, 3, 1) ) return self.module.apply( {'''params''': params or self.params} , jnp.array(__magic_name__ , dtype=jnp.floataa ) , rngs={} , )
644
'''simple docstring''' snake_case_ : int = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
644
1
'''simple docstring''' from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class __a (lowerCamelCase ): __a : Union[str, Any] = ["image_processor"] __a : List[Any] = "SamImageProcessor" def __init__( self : Optional[int] , __magic_name__ : Tuple ) -> Optional[int]: """simple docstring""" super().__init__(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = self.image_processor UpperCAmelCase_ : Optional[int] = -10 UpperCAmelCase_ : Any = self.image_processor.size['''longest_edge'''] def __call__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : List[str]=None , __magic_name__ : int=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : Dict , ) -> BatchEncoding: """simple docstring""" UpperCAmelCase_ : Union[str, Any] = self.image_processor( __magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , ) # pop arguments that are not used in the foward but used nevertheless UpperCAmelCase_ : Union[str, Any] = encoding_image_processor['''original_sizes'''] if hasattr(__magic_name__ , '''numpy''' ): # Checks if Torch or TF tensor UpperCAmelCase_ : Optional[Any] = original_sizes.numpy() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = self._check_and_preprocess_points( input_points=__magic_name__ , input_labels=__magic_name__ , input_boxes=__magic_name__ , ) UpperCAmelCase_ : List[Any] = self._normalize_and_convert( __magic_name__ , __magic_name__ , input_points=__magic_name__ , input_labels=__magic_name__ , input_boxes=__magic_name__ , return_tensors=__magic_name__ , ) return encoding_image_processor def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : List[str]=None , __magic_name__ : List[str]=None , __magic_name__ : Optional[int]=None , __magic_name__ : List[Any]="pt" , ) -> Tuple: """simple docstring""" if input_points is not None: if len(__magic_name__ ) != len(__magic_name__ ): UpperCAmelCase_ : Dict = [ self._normalize_coordinates(self.target_size , __magic_name__ , original_sizes[0] ) for point in input_points ] else: UpperCAmelCase_ : int = [ self._normalize_coordinates(self.target_size , __magic_name__ , __magic_name__ ) for point, original_size in zip(__magic_name__ , __magic_name__ ) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points ): if input_labels is not None: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self._pad_points_and_labels(__magic_name__ , __magic_name__ ) UpperCAmelCase_ : List[Any] = np.array(__magic_name__ ) if input_labels is not None: UpperCAmelCase_ : List[str] = np.array(__magic_name__ ) if input_boxes is not None: if len(__magic_name__ ) != len(__magic_name__ ): UpperCAmelCase_ : List[Any] = [ self._normalize_coordinates(self.target_size , __magic_name__ , original_sizes[0] , is_bounding_box=__magic_name__ ) for box in input_boxes ] else: UpperCAmelCase_ : Any = [ self._normalize_coordinates(self.target_size , __magic_name__ , __magic_name__ , is_bounding_box=__magic_name__ ) for box, original_size in zip(__magic_name__ , __magic_name__ ) ] UpperCAmelCase_ : int = np.array(__magic_name__ ) if input_boxes is not None: if return_tensors == "pt": UpperCAmelCase_ : List[str] = torch.from_numpy(__magic_name__ ) # boxes batch size of 1 by default UpperCAmelCase_ : Tuple = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes elif return_tensors == "tf": UpperCAmelCase_ : Union[str, Any] = tf.convert_to_tensor(__magic_name__ ) # boxes batch size of 1 by default UpperCAmelCase_ : Tuple = tf.expand_dims(__magic_name__ , 1 ) if len(input_boxes.shape ) != 3 else input_boxes encoding_image_processor.update({'''input_boxes''': input_boxes} ) if input_points is not None: if return_tensors == "pt": UpperCAmelCase_ : List[Any] = torch.from_numpy(__magic_name__ ) # point batch size of 1 by default UpperCAmelCase_ : Tuple = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points elif return_tensors == "tf": UpperCAmelCase_ : List[str] = tf.convert_to_tensor(__magic_name__ ) # point batch size of 1 by default UpperCAmelCase_ : str = tf.expand_dims(__magic_name__ , 1 ) if len(input_points.shape ) != 4 else input_points encoding_image_processor.update({'''input_points''': input_points} ) if input_labels is not None: if return_tensors == "pt": UpperCAmelCase_ : Dict = torch.from_numpy(__magic_name__ ) # point batch size of 1 by default UpperCAmelCase_ : Any = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels elif return_tensors == "tf": UpperCAmelCase_ : int = tf.convert_to_tensor(__magic_name__ ) # point batch size of 1 by default UpperCAmelCase_ : Any = tf.expand_dims(__magic_name__ , 1 ) if len(input_labels.shape ) != 3 else input_labels encoding_image_processor.update({'''input_labels''': input_labels} ) return encoding_image_processor def UpperCAmelCase__ ( self : int , __magic_name__ : Any , __magic_name__ : Dict ) -> Dict: """simple docstring""" UpperCAmelCase_ : str = max([point.shape[0] for point in input_points] ) UpperCAmelCase_ : Any = [] for i, point in enumerate(__magic_name__ ): if point.shape[0] != expected_nb_points: UpperCAmelCase_ : Optional[Any] = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 ) UpperCAmelCase_ : str = np.append(input_labels[i] , [self.point_pad_value] ) processed_input_points.append(__magic_name__ ) UpperCAmelCase_ : Optional[Any] = processed_input_points return input_points, input_labels def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : np.ndarray , __magic_name__ : Any , __magic_name__ : Optional[int]=False ) -> np.ndarray: """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : Any = original_size UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.image_processor._get_preprocess_shape(__magic_name__ , longest_edge=__magic_name__ ) UpperCAmelCase_ : Optional[int] = deepcopy(__magic_name__ ).astype(__magic_name__ ) if is_bounding_box: UpperCAmelCase_ : Dict = coords.reshape(-1 , 2 , 2 ) UpperCAmelCase_ : Optional[Any] = coords[..., 0] * (new_w / old_w) UpperCAmelCase_ : str = coords[..., 1] * (new_h / old_h) if is_bounding_box: UpperCAmelCase_ : List[str] = coords.reshape(-1 , 4 ) return coords def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Any=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Tuple=None , ) -> Optional[int]: """simple docstring""" if input_points is not None: if hasattr(__magic_name__ , '''numpy''' ): # Checks for TF or Torch tensor UpperCAmelCase_ : Union[str, Any] = input_points.numpy().tolist() if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_points[0] , __magic_name__ ): raise ValueError('''Input points must be a list of list of floating points.''' ) UpperCAmelCase_ : int = [np.array(__magic_name__ ) for input_point in input_points] else: UpperCAmelCase_ : List[str] = None if input_labels is not None: if hasattr(__magic_name__ , '''numpy''' ): UpperCAmelCase_ : Optional[int] = input_labels.numpy().tolist() if not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_labels[0] , __magic_name__ ): raise ValueError('''Input labels must be a list of list integers.''' ) UpperCAmelCase_ : Union[str, Any] = [np.array(__magic_name__ ) for label in input_labels] else: UpperCAmelCase_ : int = None if input_boxes is not None: if hasattr(__magic_name__ , '''numpy''' ): UpperCAmelCase_ : int = input_boxes.numpy().tolist() if ( not isinstance(__magic_name__ , __magic_name__ ) or not isinstance(input_boxes[0] , __magic_name__ ) or not isinstance(input_boxes[0][0] , __magic_name__ ) ): raise ValueError('''Input boxes must be a list of list of list of floating points.''' ) UpperCAmelCase_ : Any = [np.array(__magic_name__ ).astype(np.floataa ) for box in input_boxes] else: UpperCAmelCase_ : int = None return input_points, input_labels, input_boxes @property def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]: """simple docstring""" UpperCAmelCase_ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(__magic_name__ ) ) def UpperCAmelCase__ ( self : Any , *__magic_name__ : Optional[int] , **__magic_name__ : List[str] ) -> List[Any]: """simple docstring""" return self.image_processor.post_process_masks(*__magic_name__ , **__magic_name__ )
644
'''simple docstring''' import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __a (unittest.TestCase ): @property def UpperCAmelCase__ ( self : Dict ) -> str: """simple docstring""" torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , ) return model def UpperCAmelCase__ ( self : Dict ) -> Tuple: """simple docstring""" UpperCAmelCase_ : List[Any] = self.dummy_uncond_unet UpperCAmelCase_ : Dict = KarrasVeScheduler() UpperCAmelCase_ : Union[str, Any] = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Tuple = torch.manual_seed(0 ) UpperCAmelCase_ : str = pipe(num_inference_steps=2 , generator=__magic_name__ , output_type='''numpy''' , return_dict=__magic_name__ )[0] UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) UpperCAmelCase_ : Dict = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : List[str] = '''google/ncsnpp-celebahq-256''' UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained(__magic_name__ ) UpperCAmelCase_ : List[Any] = KarrasVeScheduler() UpperCAmelCase_ : Any = KarrasVePipeline(unet=__magic_name__ , scheduler=__magic_name__ ) pipe.to(__magic_name__ ) pipe.set_progress_bar_config(disable=__magic_name__ ) UpperCAmelCase_ : Dict = torch.manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe(num_inference_steps=20 , generator=__magic_name__ , output_type='''numpy''' ).images UpperCAmelCase_ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 2_56, 2_56, 3) UpperCAmelCase_ : Optional[Any] = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
644
1
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder snake_case_ : str = "__DUMMY_TRANSFORMERS_USER__" snake_case_ : Optional[Any] = "Dummy User" snake_case_ : Optional[int] = "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" snake_case_ : Any = "https://hub-ci.huggingface.co" snake_case_ : List[str] = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}" snake_case_ : Tuple = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}" snake_case_ : List[str] = Path("~/.huggingface/hub_ci_token").expanduser() @pytest.fixture def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[str]: monkeypatch.setattr( '''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''', SCREAMING_SNAKE_CASE__ ) @pytest.fixture def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict: monkeypatch.setattr('''datasets.config.HF_ENDPOINT''', SCREAMING_SNAKE_CASE__ ) monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''', SCREAMING_SNAKE_CASE__ ) @pytest.fixture def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]: monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''', SCREAMING_SNAKE_CASE__ ) @pytest.fixture def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str, SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: HfFolder.save_token(SCREAMING_SNAKE_CASE__ ) yield HfFolder.delete_token() @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( ) -> Dict: return HfApi(endpoint=SCREAMING_SNAKE_CASE__ ) @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : HfApi ) -> List[str]: UpperCAmelCase_ : Dict = HfFolder.get_token() HfFolder.save_token(SCREAMING_SNAKE_CASE__ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(SCREAMING_SNAKE_CASE__ ) @pytest.fixture def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict: def _cleanup_repo(SCREAMING_SNAKE_CASE__ : Optional[Any] ): hf_api.delete_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ) return _cleanup_repo @pytest.fixture def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]: @contextmanager def _temporary_repo(SCREAMING_SNAKE_CASE__ : List[str] ): try: yield repo_id finally: cleanup_repo(SCREAMING_SNAKE_CASE__ ) return _temporary_repo @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : HfApi, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = F"""repo_txt_data-{int(time.time() * 10E3 )}""" UpperCAmelCase_ : Tuple = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''', private=SCREAMING_SNAKE_CASE__ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE__, path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ), path_in_repo='''data/text_data.txt''', repo_id=SCREAMING_SNAKE_CASE__, repo_type='''dataset''', ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : List[str], SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : str ) -> Dict: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : HfApi, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : Dict = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}""" UpperCAmelCase_ : str = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''', private=SCREAMING_SNAKE_CASE__ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE__, path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ), path_in_repo='''data.zip''', repo_id=SCREAMING_SNAKE_CASE__, repo_type='''dataset''', ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : Optional[Any], SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='''session''' ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : HfApi, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : Any ) -> Tuple: UpperCAmelCase_ : str = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}""" UpperCAmelCase_ : Dict = F"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''', private=SCREAMING_SNAKE_CASE__ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE__, path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ), path_in_repo='''data.zip''', repo_id=SCREAMING_SNAKE_CASE__, repo_type='''dataset''', ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE__, token=SCREAMING_SNAKE_CASE__, repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Any, SCREAMING_SNAKE_CASE__ : Dict, SCREAMING_SNAKE_CASE__ : List[str] ) -> Any: return hf_private_dataset_repo_zipped_img_data_
644
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class __a (lowerCamelCase ): __a : List[Any] = "openai/whisper-base" __a : Optional[Any] = ( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) __a : Any = "transcriber" __a : str = WhisperProcessor __a : List[Any] = WhisperForConditionalGeneration __a : int = ["audio"] __a : Optional[Any] = ["text"] def UpperCAmelCase__ ( self : Dict , __magic_name__ : List[str] ) -> Optional[int]: """simple docstring""" return self.pre_processor(__magic_name__ , return_tensors='''pt''' ).input_features def UpperCAmelCase__ ( self : Dict , __magic_name__ : Dict ) -> Tuple: """simple docstring""" return self.model.generate(inputs=__magic_name__ ) def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict ) -> str: """simple docstring""" return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0]
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : str ) -> str: return " ".join( ''''''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
644
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a, SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> int: while y: # --> when y=0 then loop will terminate and return x as final GCD. UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = y, x % y return abs(SCREAMING_SNAKE_CASE__ ) def lowerCamelCase_ ( ) -> Optional[int]: try: UpperCAmelCase_ : Optional[Any] = input('''Enter two integers separated by comma (,): ''' ).split(''',''' ) UpperCAmelCase_ : Optional[int] = int(nums[0] ) UpperCAmelCase_ : List[Any] = int(nums[1] ) print( F"""greatest_common_divisor({num_a}, {num_a}) = """ F"""{greatest_common_divisor(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )}""" ) except (IndexError, UnboundLocalError, ValueError): print('''Wrong input''' ) if __name__ == "__main__": main()
644
1
'''simple docstring''' def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list] ) -> list[list]: UpperCAmelCase_ : List[str] = current_set.copy() for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ): UpperCAmelCase_ : str = row[0] for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ): if magnitude == 0: UpperCAmelCase_ : List[str] = column continue UpperCAmelCase_ : Tuple = column / magnitude # Subtract to cancel term UpperCAmelCase_ : Union[str, Any] = current_set[0] UpperCAmelCase_ : Optional[int] = [first_row] UpperCAmelCase_ : Optional[Any] = current_set[1::] for row in current_set: UpperCAmelCase_ : Any = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(SCREAMING_SNAKE_CASE__ ) continue for column_index in range(len(SCREAMING_SNAKE_CASE__ ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(SCREAMING_SNAKE_CASE__ ) # Create next recursion iteration set if len(final_set[0] ) != 3: UpperCAmelCase_ : Dict = final_set[0] UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Any = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) UpperCAmelCase_ : Dict = simplify(SCREAMING_SNAKE_CASE__ ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): resultant[i].insert(0, current_first_column[i] ) resultant.insert(0, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : Optional[Any] = resultant return final_set def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : list[list] ) -> list: if len(SCREAMING_SNAKE_CASE__ ) == 0: raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) UpperCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE__ ) + 1 if any(len(SCREAMING_SNAKE_CASE__ ) != _length for item in equations ): raise IndexError('''solve_simultaneous() requires n lists of length n+1''' ) for row in equations: if any(not isinstance(SCREAMING_SNAKE_CASE__, (int, float) ) for column in row ): raise ValueError('''solve_simultaneous() requires lists of integers''' ) if len(SCREAMING_SNAKE_CASE__ ) == 1: return [equations[0][-1] / equations[0][0]] UpperCAmelCase_ : str = equations.copy() if any(0 in row for row in data_set ): UpperCAmelCase_ : str = data_set.copy() UpperCAmelCase_ : List[str] = [] for row_index, row in enumerate(SCREAMING_SNAKE_CASE__ ): if 0 not in row: UpperCAmelCase_ : Optional[int] = data_set.pop(SCREAMING_SNAKE_CASE__ ) break if not full_row: raise ValueError('''solve_simultaneous() requires at least 1 full equation''' ) data_set.insert(0, SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : int = data_set.copy() UpperCAmelCase_ : List[Any] = simplify(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[Any] = simplified[::-1] UpperCAmelCase_ : list = [] for row in simplified: UpperCAmelCase_ : Optional[Any] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue UpperCAmelCase_ : Union[str, Any] = row.copy()[: len(SCREAMING_SNAKE_CASE__ ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(SCREAMING_SNAKE_CASE__ ) == 0: solutions.append(0 ) continue UpperCAmelCase_ : Tuple = temp_row[1::] UpperCAmelCase_ : Union[str, Any] = temp_row[::-1] for column_index, column in enumerate(SCREAMING_SNAKE_CASE__ ): current_solution -= column * solutions[column_index] solutions.append(SCREAMING_SNAKE_CASE__ ) UpperCAmelCase_ : List[str] = [] for item in solutions: final.append(float(round(SCREAMING_SNAKE_CASE__, 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() snake_case_ : Optional[int] = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
644
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class __a : def __init__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Any=13 , __magic_name__ : Any=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : str=True , __magic_name__ : Optional[int]=True , __magic_name__ : List[Any]=99 , __magic_name__ : int=24 , __magic_name__ : Optional[int]=2 , __magic_name__ : Tuple=6 , __magic_name__ : Union[str, Any]=37 , __magic_name__ : Optional[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Tuple=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Tuple=0.0_2 , __magic_name__ : Optional[Any]=3 , __magic_name__ : Optional[int]=None , __magic_name__ : Any=10_00 , ) -> str: """simple docstring""" UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = seq_length UpperCAmelCase_ : Dict = is_training UpperCAmelCase_ : List[str] = use_input_mask UpperCAmelCase_ : Any = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Any = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Tuple = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : Union[str, Any] = hidden_act UpperCAmelCase_ : Optional[int] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : Union[str, Any] = max_position_embeddings UpperCAmelCase_ : int = type_vocab_size UpperCAmelCase_ : List[Any] = type_sequence_label_size UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = num_labels UpperCAmelCase_ : List[str] = scope UpperCAmelCase_ : List[str] = range_bbox def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase_ : List[str] = bbox[i, j, 3] UpperCAmelCase_ : Dict = bbox[i, j, 1] UpperCAmelCase_ : Optional[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase_ : List[str] = bbox[i, j, 2] UpperCAmelCase_ : Tuple = bbox[i, j, 0] UpperCAmelCase_ : Union[str, Any] = t UpperCAmelCase_ : int = None if self.use_input_mask: UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Dict = None UpperCAmelCase_ : Tuple = None if self.use_labels: UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : int , ) -> Optional[int]: """simple docstring""" UpperCAmelCase_ : Any = LiltModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(__magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : List[Any] = model(__magic_name__ , bbox=__magic_name__ , token_type_ids=__magic_name__ ) UpperCAmelCase_ : Optional[int] = model(__magic_name__ , bbox=__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCAmelCase__ ( self : int , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[Any] , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = LiltForTokenClassification(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : List[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Any , ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : str = LiltForQuestionAnswering(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() UpperCAmelCase_ : Optional[Any] = model( __magic_name__ , bbox=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , start_positions=__magic_name__ , end_positions=__magic_name__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Tuple = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): __a : Tuple = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) __a : Any = ( { "feature-extraction": LiltModel, "question-answering": LiltForQuestionAnswering, "text-classification": LiltForSequenceClassification, "token-classification": LiltForTokenClassification, "zero-shot": LiltForSequenceClassification, } if is_torch_available() else {} ) __a : Union[str, Any] = False __a : int = False def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[Any] , __magic_name__ : int ) -> str: """simple docstring""" return True def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : List[Any] = LiltModelTester(self ) UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : str ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> str: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase_ : Tuple = type self.model_tester.create_and_check_model(*__magic_name__ ) def UpperCAmelCase__ ( self : Union[str, Any] ) -> int: """simple docstring""" UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__magic_name__ ) def UpperCAmelCase__ ( self : str ) -> List[Any]: """simple docstring""" UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__magic_name__ ) @slow def UpperCAmelCase__ ( self : int ) -> Union[str, Any]: """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = LiltModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) @require_torch @slow class __a (unittest.TestCase ): def UpperCAmelCase__ ( self : Tuple ) -> Tuple: """simple docstring""" UpperCAmelCase_ : str = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(__magic_name__ ) UpperCAmelCase_ : Any = torch.tensor([[1, 2]] , device=__magic_name__ ) UpperCAmelCase_ : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__magic_name__ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Optional[int] = model(input_ids=__magic_name__ , bbox=__magic_name__ ) UpperCAmelCase_ : int = torch.Size([1, 2, 7_68] ) UpperCAmelCase_ : List[str] = torch.tensor( [[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=__magic_name__ , ) self.assertTrue(outputs.last_hidden_state.shape , __magic_name__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __magic_name__ , atol=1E-3 ) )
644
1