code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from ...utils import logging from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel from .configuration_mta import MTaConfig lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = 'T5Config' class _lowerCAmelCase ( __UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[int] = 'mt5' SCREAMING_SNAKE_CASE_: int = MTaConfig class _lowerCAmelCase ( __UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Dict = 'mt5' SCREAMING_SNAKE_CASE_: List[str] = MTaConfig class _lowerCAmelCase ( __UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Optional[Any] = 'mt5' SCREAMING_SNAKE_CASE_: int = MTaConfig
621
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'caidas/swin2sr-classicalsr-x2-64': ( 'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json' ), } class _lowerCAmelCase ( __UpperCAmelCase ): SCREAMING_SNAKE_CASE_: Any = 'swin2sr' SCREAMING_SNAKE_CASE_: Dict = { 'hidden_size': 'embed_dim', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , lowerCAmelCase_=6_4 , lowerCAmelCase_=1 , lowerCAmelCase_=3 , lowerCAmelCase_=1_8_0 , lowerCAmelCase_=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_=[6, 6, 6, 6, 6, 6] , lowerCAmelCase_=8 , lowerCAmelCase_=2.0 , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.1 , lowerCAmelCase_="gelu" , lowerCAmelCase_=False , lowerCAmelCase_=0.02 , lowerCAmelCase_=1e-5 , lowerCAmelCase_=2 , lowerCAmelCase_=1.0 , lowerCAmelCase_="1conv" , lowerCAmelCase_="pixelshuffle" , **lowerCAmelCase_ , ) -> Dict: super().__init__(**lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE : Optional[Any] = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = patch_size _SCREAMING_SNAKE_CASE : str = num_channels _SCREAMING_SNAKE_CASE : Any = embed_dim _SCREAMING_SNAKE_CASE : Optional[int] = depths _SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase_ ) _SCREAMING_SNAKE_CASE : Tuple = num_heads _SCREAMING_SNAKE_CASE : Optional[Any] = window_size _SCREAMING_SNAKE_CASE : List[str] = mlp_ratio _SCREAMING_SNAKE_CASE : List[str] = qkv_bias _SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings _SCREAMING_SNAKE_CASE : str = layer_norm_eps _SCREAMING_SNAKE_CASE : List[Any] = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = upscale _SCREAMING_SNAKE_CASE : str = img_range _SCREAMING_SNAKE_CASE : Any = resi_connection _SCREAMING_SNAKE_CASE : Tuple = upsampler
621
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A = logging.get_logger(__name__) A = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class __lowercase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = '''resnet''' __lowerCAmelCase = ['''basic''', '''bottleneck'''] def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=64 , _UpperCAmelCase=[256, 512, 1024, 2048] , _UpperCAmelCase=[3, 4, 6, 3] , _UpperCAmelCase="bottleneck" , _UpperCAmelCase="relu" , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ): super().__init__(**_UpperCAmelCase ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) __a : Any = num_channels __a : Optional[Any] = embedding_size __a : Union[str, Any] = hidden_sizes __a : List[Any] = depths __a : Any = layer_type __a : Optional[int] = hidden_act __a : Dict = downsample_in_first_stage __a : str = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(_UpperCAmelCase ) + 1 )] __a : Optional[int] = get_aligned_output_features_output_indices( out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names ) class __lowercase ( _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = version.parse('''1.11''' ) @property def _lowerCamelCase ( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowerCamelCase ( self ): return 1e-3
702
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A = { '''configuration_jukebox''': [ '''JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''JukeboxConfig''', '''JukeboxPriorConfig''', '''JukeboxVQVAEConfig''', ], '''tokenization_jukebox''': ['''JukeboxTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ '''JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST''', '''JukeboxModel''', '''JukeboxPreTrainedModel''', '''JukeboxVQVAE''', '''JukeboxPrior''', ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
101
0
from __future__ import annotations def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' # Checks if the entire collection has been sorted if len(A__ ) <= 1 or n <= 1: return insert_next(A__ , n - 1 ) rec_insertion_sort(A__ , n - 1 ) def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' # Checks order between adjacent elements if index >= len(A__ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order lowercase__ = ( collection[index], collection[index - 1], ) insert_next(A__ , index + 1 ) if __name__ == "__main__": A__ : List[Any] = input('Enter integers separated by spaces: ') A__ : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
183
"""simple docstring""" import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging a_ : int = logging.get_logger(__name__) a_ : Union[str, Any] = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class __lowercase( lowercase__ ): '''simple docstring''' __a : Any = 'encodec' def __init__( self , __a=[1.5, 3.0, 6.0, 12.0, 24.0] , __a=24000 , __a=1 , __a=False , __a=None , __a=None , __a=128 , __a=32 , __a=1 , __a=[8, 5, 4, 2] , __a="weight_norm" , __a=7 , __a=7 , __a=3 , __a=2 , __a=True , __a="reflect" , __a=2 , __a=2 , __a=1.0 , __a=1024 , __a=None , __a=True , **__a , ): __lowerCamelCase : Optional[int] = target_bandwidths __lowerCamelCase : Dict = sampling_rate __lowerCamelCase : Tuple = audio_channels __lowerCamelCase : List[Any] = normalize __lowerCamelCase : List[str] = chunk_length_s __lowerCamelCase : Optional[int] = overlap __lowerCamelCase : List[str] = hidden_size __lowerCamelCase : Tuple = num_filters __lowerCamelCase : Optional[Any] = num_residual_layers __lowerCamelCase : List[Any] = upsampling_ratios __lowerCamelCase : int = norm_type __lowerCamelCase : str = kernel_size __lowerCamelCase : Tuple = last_kernel_size __lowerCamelCase : str = residual_kernel_size __lowerCamelCase : Tuple = dilation_growth_rate __lowerCamelCase : Any = use_causal_conv __lowerCamelCase : str = pad_mode __lowerCamelCase : List[str] = compress __lowerCamelCase : int = num_lstm_layers __lowerCamelCase : str = trim_right_ratio __lowerCamelCase : Optional[int] = codebook_size __lowerCamelCase : Any = codebook_dim if codebook_dim is not None else hidden_size __lowerCamelCase : Tuple = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' ) super().__init__(**__a ) @property def snake_case_ ( self ): if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def snake_case_ ( self ): if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) @property def snake_case_ ( self ): __lowerCamelCase : str = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def snake_case_ ( self ): return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
594
0
'''simple docstring''' from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=3_0 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1_0 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=None , ): '''simple docstring''' lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = image_size lowerCamelCase__ = patch_size lowerCamelCase__ = num_channels lowerCamelCase__ = is_training lowerCamelCase__ = use_labels lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCamelCase__ = (image_size // patch_size) ** 2 lowerCamelCase__ = num_patches + 1 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = self.get_config() return config, pixel_values, labels def __lowerCamelCase ( self ): '''simple docstring''' return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = TFViTModel(config=__A ) lowerCamelCase__ = model(__A , training=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. lowerCamelCase__ = self.image_size // 2 lowerCamelCase__ = pixel_values[:, :, :image_size, :image_size] lowerCamelCase__ = model(__A , interpolate_pos_encoding=__A , training=__A ) lowerCamelCase__ = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = self.type_sequence_label_size lowerCamelCase__ = TFViTForImageClassification(__A ) lowerCamelCase__ = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. lowerCamelCase__ = self.image_size // 2 lowerCamelCase__ = pixel_values[:, :, :image_size, :image_size] lowerCamelCase__ = model(__A , interpolate_pos_encoding=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase__ = 1 lowerCamelCase__ = TFViTForImageClassification(__A ) lowerCamelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase__ = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.prepare_config_and_inputs() lowerCamelCase__ = config_and_inputs lowerCamelCase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class __A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () lowerCAmelCase_ = ( {"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification} if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFViTModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def __lowerCamelCase ( self ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass @unittest.skip(reason='''ViT does not use inputs_embeds''' ) def __lowerCamelCase ( self ): '''simple docstring''' pass def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase__ = model_class(__A ) lowerCamelCase__ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase__ = [*signature.parameters.keys()] lowerCamelCase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , __A ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' ) self.assertIsNotNone(__A ) def lowerCAmelCase__() -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __A ( unittest.TestCase ): '''simple docstring''' @cached_property def __lowerCamelCase ( self ): '''simple docstring''' return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None @slow def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ) lowerCamelCase__ = self.default_image_processor lowerCamelCase__ = prepare_img() lowerCamelCase__ = image_processor(images=__A , return_tensors='''tf''' ) # forward pass lowerCamelCase__ = model(**__A ) # verify the logits lowerCamelCase__ = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __A ) lowerCamelCase__ = tf.constant([-0.2744, 0.8215, -0.0836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1E-4 )
703
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"], "convert_funnel_original_tf_checkpoint_to_pytorch": [], "tokenization_funnel": ["FunnelTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["FunnelTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "FunnelPreTrainedModel", "load_tf_weights_in_funnel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", "TFFunnelPreTrainedModel", ] if TYPE_CHECKING: from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig from .tokenization_funnel import FunnelTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_funnel_fast import FunnelTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, FunnelPreTrainedModel, load_tf_weights_in_funnel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, TFFunnelPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
29
0
'''simple docstring''' from collections.abc import Callable import numpy as np def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' _snake_case = int(np.ceil((x_end - xa) / step_size ) ) _snake_case = np.zeros((n + 1,) ) _snake_case = ya _snake_case = xa for k in range(SCREAMING_SNAKE_CASE__ ): _snake_case = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) _snake_case = y[k] + ( (step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
672
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ : Dict = { """configuration_pix2struct""": [ """PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Pix2StructConfig""", """Pix2StructTextConfig""", """Pix2StructVisionConfig""", ], """processing_pix2struct""": ["""Pix2StructProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : List[str] = ["""Pix2StructImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : List[Any] = [ """PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Pix2StructPreTrainedModel""", """Pix2StructForConditionalGeneration""", """Pix2StructVisionModel""", """Pix2StructTextModel""", ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys __magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
672
1
import unittest from transformers import DonutProcessor UpperCAmelCase_ : str = 'naver-clova-ix/donut-base' class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: a_ : Optional[int] = DonutProcessor.from_pretrained(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: a_ : Optional[int] = { 'name': 'John Doe', 'age': '99', 'city': 'Atlanta', 'state': 'GA', 'zip': '30301', 'phone': '123-4567', 'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}], } a_ : Union[str, Any] = ( '<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>' '<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>' '<s_nicknames><s_nickname>Johnny</s_nickname>' '<sep/><s_nickname>JD</s_nickname></s_nicknames>' ) a_ : Dict = self.processor.tokenajson(SCREAMING_SNAKE_CASE__ ) self.assertDictEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
443
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int , __A : int ) -> float: """simple docstring""" a_ : Optional[int] = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
443
1
import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class lowerCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self :Optional[Any] , lowerCamelCase__ :Dict ): UpperCamelCase__ :Union[str, Any] = parent def __a ( self :Any ): return {} def A ( ) -> Union[str, Any]: UpperCamelCase__ :Dict = """<HTML> <HEAD> <TITLE>sample document</TITLE> </HEAD> <BODY BGCOLOR=\"FFFFFF\"> <HR> <a href=\"http://google.com\">Goog</a> <H1>This is one header</H1> <H2>This is a another Header</H2> <P>Travel from <P> <B>SFO to JFK</B> <BR> <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B> <HR> <div style=\"color:#0000FF\"> <h3>Traveler <b> name </b> is <p> John Doe </p> </div>""" UpperCamelCase__ :Tuple = """ <!DOCTYPE html> <html> <body> <h1>My First Heading</h1> <p>My first paragraph.</p> </body> </html> """ return [html_string_a, html_string_a] @require_bsa class lowerCAmelCase_ ( lowercase , unittest.TestCase ): """simple docstring""" _snake_case : Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None def __a ( self :List[str] ): UpperCamelCase__ :str = MarkupLMFeatureExtractionTester(self ) @property def __a ( self :str ): return self.feature_extract_tester.prepare_feat_extract_dict() def __a ( self :Union[str, Any] ): # Initialize feature_extractor UpperCamelCase__ :Optional[int] = self.feature_extraction_class() # Test not batched input UpperCamelCase__ :List[str] = get_html_strings()[0] UpperCamelCase__ :List[str] = feature_extractor(lowerCamelCase__ ) # fmt: off UpperCamelCase__ :List[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]] UpperCamelCase__ :List[Any] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]] # fmt: on self.assertEqual(encoding.nodes , lowerCamelCase__ ) self.assertEqual(encoding.xpaths , lowerCamelCase__ ) # Test batched UpperCamelCase__ :int = get_html_strings() UpperCamelCase__ :str = feature_extractor(lowerCamelCase__ ) # fmt: off UpperCamelCase__ :Union[str, Any] = expected_nodes + [["""My First Heading""", """My first paragraph."""]] UpperCamelCase__ :List[Any] = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]] self.assertEqual(len(encoding.nodes ) , 2 ) self.assertEqual(len(encoding.xpaths ) , 2 ) self.assertEqual(encoding.nodes , lowerCamelCase__ ) self.assertEqual(encoding.xpaths , lowerCamelCase__ )
45
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy lowerCAmelCase = logging.get_logger(__name__) class lowerCamelCase ( _A ): def __init__( self , a_ , a_ , a_ , **a_ ): lowerCAmelCase : Tuple = feature_size lowerCAmelCase : int = sampling_rate lowerCAmelCase : str = padding_value lowerCAmelCase : int = kwargs.pop("padding_side" , "right" ) lowerCAmelCase : int = kwargs.pop("return_attention_mask" , a_ ) super().__init__(**a_ ) def _lowerCamelCase ( self , a_ , a_ = True , a_ = None , a_ = False , a_ = None , a_ = None , a_ = None , ): # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(a_ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): lowerCAmelCase : str = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F''' to this method that includes {self.model_input_names[0]}, but you provided''' F''' {list(processed_features.keys() )}''' ) lowerCAmelCase : Any = processed_features[self.model_input_names[0]] lowerCAmelCase : Dict = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(a_ ) == 0: if return_attention_mask: lowerCAmelCase : List[Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch lowerCAmelCase : Optional[int] = required_input[0] if isinstance(a_ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. lowerCAmelCase : Optional[Any] = 0 while len(required_input[index] ) == 0: index += 1 if index < len(a_ ): lowerCAmelCase : Union[str, Any] = required_input[index][0] if return_tensors is None: if is_tf_tensor(a_ ): lowerCAmelCase : Tuple = "tf" elif is_torch_tensor(a_ ): lowerCAmelCase : str = "pt" elif isinstance(a_ , (int, float, list, tuple, np.ndarray) ): lowerCAmelCase : Dict = "np" else: raise ValueError( F'''type of {first_element} unknown: {type(a_ )}. ''' "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): lowerCAmelCase : Dict = to_numpy(a_ ) else: lowerCAmelCase : Any = [to_numpy(a_ ) for v in value] # Convert padding_strategy in PaddingStrategy lowerCAmelCase : Optional[int] = self._get_padding_strategies(padding=a_ , max_length=a_ ) lowerCAmelCase : Dict = processed_features[self.model_input_names[0]] lowerCAmelCase : Any = len(a_ ) if not all(len(a_ ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) lowerCAmelCase : Tuple = [] for i in range(a_ ): lowerCAmelCase : Any = {k: v[i] for k, v in processed_features.items()} # truncation lowerCAmelCase : Tuple = self._truncate( a_ , max_length=a_ , pad_to_multiple_of=a_ , truncation=a_ , ) truncated_inputs.append(a_ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length lowerCAmelCase : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) lowerCAmelCase : int = PaddingStrategy.MAX_LENGTH lowerCAmelCase : List[Any] = {} for i in range(a_ ): # padding lowerCAmelCase : Optional[int] = self._pad( truncated_inputs[i] , max_length=a_ , padding_strategy=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , ) for key, value in outputs.items(): if key not in batch_outputs: lowerCAmelCase : Dict = [] if value.dtype is np.dtype(np.floataa ): lowerCAmelCase : int = value.astype(np.floataa ) batch_outputs[key].append(a_ ) return BatchFeature(a_ , tensor_type=a_ ) def _lowerCamelCase ( self , a_ , a_ = None , a_ = PaddingStrategy.DO_NOT_PAD , a_ = None , a_ = None , ): lowerCAmelCase : Tuple = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: lowerCAmelCase : Dict = len(a_ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowerCAmelCase : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowerCAmelCase : List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(a_ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: lowerCAmelCase : Any = np.ones(len(a_ ) , dtype=np.intaa ) if needs_to_be_padded: lowerCAmelCase : Dict = max_length - len(a_ ) if self.padding_side == "right": if return_attention_mask: lowerCAmelCase : int = np.pad( processed_features["attention_mask"] , (0, difference) ) lowerCAmelCase : Optional[int] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) lowerCAmelCase : Any = np.pad( a_ , a_ , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: lowerCAmelCase : Dict = np.pad( processed_features["attention_mask"] , (difference, 0) ) lowerCAmelCase : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) lowerCAmelCase : Any = np.pad( a_ , a_ , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def _lowerCamelCase ( self , a_ , a_ = None , a_ = None , a_ = None , ): if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) lowerCAmelCase : int = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowerCAmelCase : Union[str, Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowerCAmelCase : Any = len(a_ ) > max_length if needs_to_be_truncated: lowerCAmelCase : Optional[int] = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: lowerCAmelCase : Any = processed_features["attention_mask"][:max_length] return processed_features def _lowerCamelCase ( self , a_=False , a_=None ): # Get padding strategy if padding is not False: if padding is True: lowerCAmelCase : str = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(a_ , a_ ): lowerCAmelCase : str = PaddingStrategy(a_ ) elif isinstance(a_ , a_ ): lowerCAmelCase : Optional[int] = padding else: lowerCAmelCase : Tuple = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
525
0
"""simple docstring""" from random import randint, random def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : int = 5 , ) -> list: """simple docstring""" snake_case = [[-1] * number_of_cells] # Create a highway without any car snake_case = 0 snake_case = max(_UpperCamelCase , 0 ) while i < number_of_cells: snake_case = ( randint(0 , _UpperCamelCase ) if random_speed else initial_speed ) # Place the cars i += ( randint(1 , max_speed * 2 ) if random_frequency else frequency ) # Arbitrary number, may need tuning return highway def lowerCAmelCase__ ( _UpperCamelCase : list , _UpperCamelCase : int ) -> int: """simple docstring""" snake_case = 0 snake_case = highway_now[car_index + 1 :] for cell in range(len(_UpperCamelCase ) ): # May need a better name for this if cells[cell] != -1: # If the cell is not empty then return distance # we have the distance we wanted distance += 1 # Here if the car is near the end of the highway return distance + get_distance(_UpperCamelCase , -1 ) def lowerCAmelCase__ ( _UpperCamelCase : list , _UpperCamelCase : float , _UpperCamelCase : int ) -> list: """simple docstring""" snake_case = len(_UpperCamelCase ) # Beforce calculations, the highway is empty snake_case = [-1] * number_of_cells for car_index in range(_UpperCamelCase ): if highway_now[car_index] != -1: # Add 1 to the current speed of the car and cap the speed snake_case = min(highway_now[car_index] + 1 , _UpperCamelCase ) # Number of empty cell before the next car snake_case = get_distance(_UpperCamelCase , _UpperCamelCase ) - 1 # We can't have the car causing an accident snake_case = min(next_highway[car_index] , _UpperCamelCase ) if random() < probability: # Randomly, a driver will slow down snake_case = max(next_highway[car_index] - 1 , 0 ) return next_highway def lowerCAmelCase__ ( _UpperCamelCase : list , _UpperCamelCase : int , _UpperCamelCase : float , _UpperCamelCase : int ) -> list: """simple docstring""" snake_case = len(highway[0] ) for i in range(_UpperCamelCase ): snake_case = update(highway[i] , _UpperCamelCase , _UpperCamelCase ) snake_case = [-1] * number_of_cells for car_index in range(_UpperCamelCase ): snake_case = next_speeds_calculated[car_index] if speed != -1: # Change the position based on the speed (with % to create the loop) snake_case = (car_index + speed) % number_of_cells # Commit the change of position snake_case = speed highway.append(_UpperCamelCase ) return highway if __name__ == "__main__": import doctest doctest.testmod()
104
"""simple docstring""" from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: """simple docstring""" snake_case = [] snake_case = [] snake_case = [] for rt in rc.restypes: snake_case = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) snake_case = {name: i for i, name in enumerate(_UpperCamelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 1_4 ) restype_atomaa_to_atomaa_list.append([0] * 3_7 ) restype_atomaa_mask_list.append([0.0] * 1_4 ) snake_case = torch.tensor( _UpperCamelCase , dtype=torch.intaa , device=protein['aatype'].device , ) snake_case = torch.tensor( _UpperCamelCase , dtype=torch.intaa , device=protein['aatype'].device , ) snake_case = torch.tensor( _UpperCamelCase , dtype=torch.floataa , device=protein['aatype'].device , ) snake_case = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein snake_case = restype_atomaa_to_atomaa[protein_aatype] snake_case = restype_atomaa_mask[protein_aatype] snake_case = residx_atomaa_mask snake_case = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back snake_case = restype_atomaa_to_atomaa[protein_aatype] snake_case = residx_atomaa_to_atomaa.long() # create the corresponding mask snake_case = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): snake_case = rc.restype_atoa[restype_letter] snake_case = rc.residue_atoms[restype_name] for atom_name in atom_names: snake_case = rc.atom_order[atom_name] snake_case = 1 snake_case = restype_atomaa_mask[protein_aatype] snake_case = residx_atomaa_mask return protein def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]: """simple docstring""" snake_case = tree_map(lambda _UpperCamelCase : torch.tensor(_UpperCamelCase , device=batch['aatype'].device ) , _UpperCamelCase , np.ndarray ) snake_case = tensor_tree_map(lambda _UpperCamelCase : np.array(_UpperCamelCase ) , make_atomaa_masks(_UpperCamelCase ) ) return out
104
1
'''simple docstring''' from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): snake_case__ : str = path_or_paths snake_case__ : Any = split if split or isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else """train""" snake_case__ : int = features snake_case__ : Optional[Any] = cache_dir snake_case__ : List[str] = keep_in_memory snake_case__ : Optional[Any] = streaming snake_case__ : Optional[int] = num_proc snake_case__ : List[str] = kwargs @abstractmethod def __UpperCamelCase ( self ): pass class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ): snake_case__ : str = features snake_case__ : Tuple = cache_dir snake_case__ : List[Any] = keep_in_memory snake_case__ : Optional[int] = streaming snake_case__ : str = num_proc snake_case__ : Tuple = kwargs @abstractmethod def __UpperCamelCase ( self ): pass
38
import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCamelCase_ = logging.getLogger(__name__) lowerCamelCase_ = 5_0 # max width of layer names lowerCamelCase_ = 7_0 # max width of quantizer names def lowerCamelCase ( a_ ) -> Tuple: lowerCAmelCase_ = parser.add_argument_group('quant_trainer arguments' ) group.add_argument('--wprec' , type=a_ , default=8 , help='weight precision' ) group.add_argument('--aprec' , type=a_ , default=8 , help='activation precision' ) group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' ) group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' ) group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' ) group.add_argument('--quant-disable-keyword' , type=a_ , nargs='+' , help='disable quantizers by keyword' ) group.add_argument('--quant-disable-layer-module' , type=a_ , help='disable quantizers by keyword under layer.' ) group.add_argument('--quant-enable-layer-module' , type=a_ , help='enable quantizers by keyword under layer' ) group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' ) group.add_argument('--percentile' , default=a_ , type=a_ , help='percentile for PercentileCalibrator' ) group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' ) group.add_argument('--clip-gelu' , metavar='N' , type=a_ , help='clip gelu output maximum value to N' ) group.add_argument( '--recalibrate-weights' , action='store_true' , help=( 'recalibrate weight amaxes by taking the max of the weights.' ' amaxes will be computed with the current quantization granularity (axis).' ) , ) def lowerCamelCase ( a_ ) -> Optional[Any]: if args.calibrator == "max": lowerCAmelCase_ = 'max' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('Specify --percentile when using percentile calibrator' ) lowerCAmelCase_ = 'histogram' elif args.calibrator == "mse": lowerCAmelCase_ = 'histogram' else: raise ValueError(F'''Invalid calibrator {args.calibrator}''' ) lowerCAmelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=a_ ) lowerCAmelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(a_ ) quant_nn.QuantLinear.set_default_quant_desc_weight(a_ ) def lowerCamelCase ( a_ , a_ , a_=False , a_=False ) -> Any: logger.info('Configuring Model for Quantization' ) logger.info(F'''using quantization package {pytorch_quantization.__file__}''' ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(a_ , ['embeddings'] , which='weight' , _disabled=a_ ) if args.quant_disable: set_quantizer_by_name(a_ , [''] , _disabled=a_ ) if args.quant_disable_keyword: set_quantizer_by_name(a_ , args.quant_disable_keyword , _disabled=a_ ) if args.quant_disable_layer_module: set_quantizer_by_name(a_ , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=a_ ) if args.quant_enable_layer_module: set_quantizer_by_name(a_ , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=a_ ) if args.recalibrate_weights: recalibrate_weights(a_ ) if args.fuse_qkv: fuse_qkv(a_ , a_ ) if args.clip_gelu: clip_gelu(a_ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(a_ ) def lowerCamelCase ( a_ ) -> Dict: logger.info('Enabling Calibration' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F'''{name:80}: {module}''' ) def lowerCamelCase ( a_ , a_ ) -> Optional[Any]: logger.info('Loading calibrated amax' ) for name, module in model.named_modules(): if name.endswith('_quantizer' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('percentile' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(a_ ) def lowerCamelCase ( a_ , a_ ) -> Any: def fusea(a_ , a_ , a_ ): for mod in [qq, qk, qv]: if not hasattr(a_ , '_amax' ): print(' WARNING: NO AMAX BUFFER' ) return lowerCAmelCase_ = qq._amax.detach().item() lowerCAmelCase_ = qk._amax.detach().item() lowerCAmelCase_ = qv._amax.detach().item() lowerCAmelCase_ = max(a_ , a_ , a_ ) qq._amax.fill_(a_ ) qk._amax.fill_(a_ ) qv._amax.fill_(a_ ) logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' ) for name, mod in model.named_modules(): if name.endswith('.attention.self' ): logger.info(F'''FUSE_QKV: {name:{name_width}}''' ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def lowerCamelCase ( a_ , a_ ) -> Dict: for name, mod in model.named_modules(): if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ): lowerCAmelCase_ = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=a_ ) lowerCAmelCase_ = mod._input_quantizer._amax.data.detach().item() logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' ) def lowerCamelCase ( a_ ) -> Optional[Any]: for name, mod in model.named_modules(): if hasattr(a_ , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None: lowerCAmelCase_ = mod.weight.shape[0] lowerCAmelCase_ = mod._weight_quantizer._amax.detach() lowerCAmelCase_ = torch.ones(a_ , dtype=amax.dtype , device=amax.device ) * amax print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' ) def lowerCamelCase ( a_ ) -> Optional[Any]: for name, mod in model.named_modules(): if hasattr(a_ , '_weight_quantizer' ): if not hasattr(mod.weight_quantizer , '_amax' ): print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) lowerCAmelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) lowerCAmelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set lowerCAmelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=a_ , keepdims=a_ ).detach() logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' ) lowerCAmelCase_ = amax def lowerCamelCase ( a_ , a_=25 , a_=180 , a_=None ) -> str: if ignore is None: lowerCAmelCase_ = [] elif not isinstance(a_ , a_ ): lowerCAmelCase_ = [ignore] lowerCAmelCase_ = 0 for name, mod in model.named_modules(): if not hasattr(a_ , 'weight' ): continue lowerCAmelCase_ = max(a_ , len(a_ ) ) for name, mod in model.named_modules(): lowerCAmelCase_ = getattr(a_ , '_input_quantizer' , a_ ) lowerCAmelCase_ = getattr(a_ , '_weight_quantizer' , a_ ) if not hasattr(a_ , 'weight' ): continue if type(a_ ) in ignore: continue if [True for s in ignore if type(a_ ) is str and s in name]: continue lowerCAmelCase_ = F'''Act:{input_q.extra_repr()}''' lowerCAmelCase_ = F'''Wgt:{weight_q.extra_repr()}''' lowerCAmelCase_ = F'''{name:{name_width}} {act_str} {wgt_str}''' if len(a_ ) <= line_width: logger.info(a_ ) else: logger.info(F'''{name:{name_width}} {act_str}''' ) logger.info(F'''{" ":{name_width}} {wgt_str}''' ) def lowerCamelCase ( a_ ) -> Optional[int]: lowerCAmelCase_ = 0 for name, mod in model.named_modules(): if isinstance(a_ , pytorch_quantization.nn.TensorQuantizer ): print(F'''{name:80} {mod}''' ) count += 1 print(F'''{count} TensorQuantizers found in model''' ) def lowerCamelCase ( a_ , a_ , a_ , a_ , a_ ) -> Union[str, Any]: lowerCAmelCase_ = getattr(a_ , a_ , a_ ) if quantizer_mod is not None: assert hasattr(a_ , a_ ) setattr(a_ , a_ , a_ ) else: logger.warning(F'''{name} has no {quantizer}''' ) def lowerCamelCase ( a_ , a_ , a_="both" , **a_ ) -> Optional[int]: lowerCAmelCase_ = F'''Warning: changing {which} quantizers of {name:{qname_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' if which in ["input", "both"]: set_quantizer(a_ , a_ , '_input_quantizer' , a_ , a_ ) if which in ["weight", "both"]: set_quantizer(a_ , a_ , '_weight_quantizer' , a_ , a_ ) logger.info(a_ ) def lowerCamelCase ( a_ , a_ , **a_ ) -> Dict: for name, mod in model.named_modules(): if hasattr(a_ , '_input_quantizer' ) or hasattr(a_ , '_weight_quantizer' ): for n in names: if re.search(a_ , a_ ): set_quantizers(a_ , a_ , **a_ ) elif name.endswith('_quantizer' ): for n in names: if re.search(a_ , a_ ): lowerCAmelCase_ = F'''Warning: changing {name:{name_width}}''' for k, v in kwargs.items(): s += F''' {k}={v}''' setattr(a_ , a_ , a_ ) logger.info(a_ )
318
0
import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel __snake_case : Any =False __snake_case : Tuple =True __snake_case : Optional[int] =False if __name__ == "__main__": __snake_case : Tuple =argparse.ArgumentParser() parser.add_argument( '--repo_path', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') __snake_case : Tuple =parser.parse_args() __snake_case : List[str] ={ 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } __snake_case : int ={ 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } __snake_case : str ='' if has_file(args.repo_path, 'config.json') else 'unet' with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader: __snake_case : Tuple =reader.read() __snake_case : Dict =json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, 'config.json'): __snake_case : Optional[Any] =UNetaDModel(**config) else: __snake_case : Union[str, Any] =UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel __snake_case : Any =class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) __snake_case : List[str] =dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: __snake_case : int =config[key] del config[key] __snake_case : int =[k.replace('UNetRes', '') for k in config['down_block_types']] __snake_case : Tuple =[k.replace('UNetRes', '') for k in config['up_block_types']] if do_only_weights: __snake_case : Tuple =torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin')) __snake_case : int ={} for param_key, param_value in state_dict.items(): if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'): continue __snake_case : Any =False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split('.')[0] == key: __snake_case : Tuple =param_value __snake_case : str =True if not has_changed: __snake_case : int =param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
90
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase): '''simple docstring''' snake_case_ =AltDiffusionPipeline snake_case_ =TEXT_TO_IMAGE_PARAMS snake_case_ =TEXT_TO_IMAGE_BATCH_PARAMS snake_case_ =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case_ =TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" torch.manual_seed(0 ) lowerCAmelCase__ : int = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) lowerCAmelCase__ : List[str] = DDIMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=__lowerCamelCase ,set_alpha_to_one=__lowerCamelCase ,) torch.manual_seed(0 ) lowerCAmelCase__ : Optional[Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) lowerCAmelCase__ : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_02 ,) lowerCAmelCase__ : int = CLIPTextModel(__lowerCamelCase ) lowerCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) lowerCAmelCase__ : Union[str, Any] = 77 lowerCAmelCase__ : str = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Optional[Any]: """simple docstring""" if str(__lowerCamelCase ).startswith('''mps''' ): lowerCAmelCase__ : Tuple = torch.manual_seed(__lowerCamelCase ) else: lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', } return inputs def lowerCAmelCase__ (self ) -> str: """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" lowerCAmelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : int = self.get_dummy_components() torch.manual_seed(0 ) lowerCAmelCase__ : str = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ : Any = RobertaSeriesModelWithTransformation(__lowerCamelCase ) lowerCAmelCase__ : List[str] = text_encoder lowerCAmelCase__ : List[Any] = AltDiffusionPipeline(**__lowerCamelCase ) lowerCAmelCase__ : Optional[Any] = alt_pipe.to(__lowerCamelCase ) alt_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCAmelCase__ : List[str] = self.get_dummy_inputs(__lowerCamelCase ) lowerCAmelCase__ : str = '''A photo of an astronaut''' lowerCAmelCase__ : Any = alt_pipe(**__lowerCamelCase ) lowerCAmelCase__ : int = output.images lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Optional[Any] = np.array( [0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowerCAmelCase__ : str = self.get_dummy_components() lowerCAmelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase ) torch.manual_seed(0 ) lowerCAmelCase__ : int = RobertaSeriesConfig( hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,) # TODO: remove after fixing the non-deterministic text encoder lowerCAmelCase__ : Tuple = RobertaSeriesModelWithTransformation(__lowerCamelCase ) lowerCAmelCase__ : List[str] = text_encoder lowerCAmelCase__ : List[Any] = AltDiffusionPipeline(**__lowerCamelCase ) lowerCAmelCase__ : str = alt_pipe.to(__lowerCamelCase ) alt_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase ) lowerCAmelCase__ : Optional[int] = alt_pipe(**__lowerCamelCase ) lowerCAmelCase__ : Optional[int] = output.images lowerCAmelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowerCAmelCase__ : Union[str, Any] = np.array( [0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' ,safety_checker=__lowerCamelCase ) lowerCAmelCase__ : List[Any] = alt_pipe.to(__lowerCamelCase ) alt_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCAmelCase__ : Any = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ : List[Any] = torch.manual_seed(0 ) lowerCAmelCase__ : str = alt_pipe([prompt] ,generator=__lowerCamelCase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type='''np''' ) lowerCAmelCase__ : str = output.images lowerCAmelCase__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' ,subfolder='''scheduler''' ) lowerCAmelCase__ : List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' ,scheduler=__lowerCamelCase ,safety_checker=__lowerCamelCase ) lowerCAmelCase__ : Union[str, Any] = alt_pipe.to(__lowerCamelCase ) alt_pipe.set_progress_bar_config(disable=__lowerCamelCase ) lowerCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger''' lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 ) lowerCAmelCase__ : List[str] = alt_pipe([prompt] ,generator=__lowerCamelCase ,num_inference_steps=2 ,output_type='''numpy''' ) lowerCAmelCase__ : List[str] = output.images lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) lowerCAmelCase__ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
90
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ : Optional[Any] = { """configuration_squeezebert""": [ """SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SqueezeBertConfig""", """SqueezeBertOnnxConfig""", ], """tokenization_squeezebert""": ["""SqueezeBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : List[str] = ["""SqueezeBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : List[str] = [ """SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """SqueezeBertForMaskedLM""", """SqueezeBertForMultipleChoice""", """SqueezeBertForQuestionAnswering""", """SqueezeBertForSequenceClassification""", """SqueezeBertForTokenClassification""", """SqueezeBertModel""", """SqueezeBertModule""", """SqueezeBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys __magic_name__ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
615
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ : Optional[int] = { """configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""], """tokenization_biogpt""": ["""BioGptTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : int = [ """BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BioGptForCausalLM""", """BioGptForTokenClassification""", """BioGptForSequenceClassification""", """BioGptModel""", """BioGptPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys __magic_name__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
615
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _a ( unittest.TestCase ): """simple docstring""" def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=30 , lowerCAmelCase_=400 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=0.9 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ): _lowercase =size if size is not None else {"shortest_edge": 30} _lowercase =crop_size if crop_size is not None else {"height": 30, "width": 30} _lowercase =parent _lowercase =batch_size _lowercase =num_channels _lowercase =min_resolution _lowercase =max_resolution _lowercase =do_resize_and_center_crop _lowercase =size _lowercase =crop_pct _lowercase =crop_size _lowercase =do_normalize _lowercase =image_mean _lowercase =image_std def __lowerCAmelCase ( self ): return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _a ( lowerCamelCase_ , unittest.TestCase ): """simple docstring""" __SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ): _lowercase =PoolFormerImageProcessingTester(self ) @property def __lowerCAmelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ): _lowercase =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize_and_center_crop" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "crop_pct" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean" ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std" ) ) def __lowerCAmelCase ( self ): _lowercase =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 30} ) self.assertEqual(image_processor.crop_size , {"height": 30, "width": 30} ) _lowercase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __lowerCAmelCase ( self ): pass def __lowerCAmelCase ( self ): _lowercase =self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input _lowercase =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _lowercase =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __lowerCAmelCase ( self ): _lowercase =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input _lowercase =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _lowercase =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __lowerCAmelCase ( self ): _lowercase =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowercase =prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input _lowercase =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched _lowercase =image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
713
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): """simple docstring""" def __lowerCAmelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCAmelCase ( self ): _lowercase , _lowercase =FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , ) _lowercase ="A painting of a squirrel eating a burger" _lowercase =jax.device_count() _lowercase =num_samples * [prompt] _lowercase =sd_pipe.prepare_inputs(lowerCAmelCase_ ) _lowercase =replicate(lowerCAmelCase_ ) _lowercase =shard(lowerCAmelCase_ ) _lowercase =jax.random.PRNGKey(0 ) _lowercase =jax.random.split(lowerCAmelCase_ , jax.device_count() ) _lowercase =sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _lowercase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _lowercase =images[0, 253:256, 253:256, -1] _lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) ) _lowercase =jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ): _lowercase ="stabilityai/stable-diffusion-2" _lowercase , _lowercase =FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase_ , subfolder="scheduler" ) _lowercase , _lowercase =FlaxStableDiffusionPipeline.from_pretrained( lowerCAmelCase_ , scheduler=lowerCAmelCase_ , revision="bf16" , dtype=jnp.bfloataa , ) _lowercase =scheduler_params _lowercase ="A painting of a squirrel eating a burger" _lowercase =jax.device_count() _lowercase =num_samples * [prompt] _lowercase =sd_pipe.prepare_inputs(lowerCAmelCase_ ) _lowercase =replicate(lowerCAmelCase_ ) _lowercase =shard(lowerCAmelCase_ ) _lowercase =jax.random.PRNGKey(0 ) _lowercase =jax.random.split(lowerCAmelCase_ , jax.device_count() ) _lowercase =sd_pipe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_inference_steps=25 , jit=lowerCAmelCase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) _lowercase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) _lowercase =images[0, 253:256, 253:256, -1] _lowercase =jnp.asarray(jax.device_get(image_slice.flatten() ) ) _lowercase =jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
594
0
"""simple docstring""" from numpy import exp, pi, sqrt def _lowerCamelCase ( __a, __a = 0.0, __a = 1.0 ): return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
626
from __future__ import annotations from collections import namedtuple def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ): __lowercase : str = namedtuple("""result""" , """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" , power / current ) elif current == 0: return result("""current""" , power / voltage ) elif power == 0: return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
149
0
import re from filelock import FileLock try: import nltk _UpperCamelCase: str =True except (ImportError, ModuleNotFoundError): _UpperCamelCase: Any =False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _a ( __SCREAMING_SNAKE_CASE : str ): """simple docstring""" re.sub('<n>' , '' , __SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__SCREAMING_SNAKE_CASE ) )
585
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class __lowercase( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : str=30 , _lowerCAmelCase : Tuple=400 , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int=None , _lowerCAmelCase : Any=True , _lowerCAmelCase : Dict=1 / 255 , _lowerCAmelCase : str=True , _lowerCAmelCase : Tuple=[0.5, 0.5, 0.5] , _lowerCAmelCase : Any=[0.5, 0.5, 0.5] , _lowerCAmelCase : Optional[Any]=True , ) -> int: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p _lowerCAmelCase = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333} _lowerCAmelCase = parent _lowerCAmelCase = batch_size _lowerCAmelCase = num_channels _lowerCAmelCase = min_resolution _lowerCAmelCase = max_resolution _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean _lowerCAmelCase = image_std _lowerCAmelCase = do_pad def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[Any]: return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=False ) -> Union[str, Any]: if not batched: _lowerCAmelCase = image_inputs[0] if isinstance(_lowerCAmelCase , Image.Image ): _lowerCAmelCase , _lowerCAmelCase = image.size else: _lowerCAmelCase , _lowerCAmelCase = image.shape[1], image.shape[2] if w < h: _lowerCAmelCase = int(self.size['shortest_edge'] * h / w ) _lowerCAmelCase = self.size['shortest_edge'] elif w > h: _lowerCAmelCase = self.size['shortest_edge'] _lowerCAmelCase = int(self.size['shortest_edge'] * w / h ) else: _lowerCAmelCase = self.size['shortest_edge'] _lowerCAmelCase = self.size['shortest_edge'] else: _lowerCAmelCase = [] for image in image_inputs: _lowerCAmelCase , _lowerCAmelCase = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) _lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[0] )[0] _lowerCAmelCase = max(_lowerCAmelCase , key=lambda _lowerCAmelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = DetrImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]: _lowerCAmelCase = DetrImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]: _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'image_std' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_rescale' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'rescale_factor' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'size' ) ) self.assertTrue(hasattr(_lowerCAmelCase , 'do_pad' ) ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple: _lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} ) self.assertEqual(image_processor.do_pad , _lowerCAmelCase ) _lowerCAmelCase = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowerCAmelCase ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple: pass def SCREAMING_SNAKE_CASE_ ( self : Any ) -> List[str]: # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[Any]: # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , numpify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any: # Initialize image_processing _lowerCAmelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCAmelCase , torchify=_lowerCAmelCase ) for image in image_inputs: self.assertIsInstance(_lowerCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched _lowerCAmelCase = image_processing(_lowerCAmelCase , return_tensors='pt' ).pixel_values _lowerCAmelCase , _lowerCAmelCase = self.image_processor_tester.get_expected_values(_lowerCAmelCase , batched=_lowerCAmelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any: # prepare image and target _lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: _lowerCAmelCase = json.loads(f.read() ) _lowerCAmelCase = {'image_id': 3_9769, 'annotations': target} # encode them _lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' ) _lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , return_tensors='pt' ) # verify pixel values _lowerCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) ) # verify area _lowerCAmelCase = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) ) # verify boxes _lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) ) # verify image_id _lowerCAmelCase = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) ) # verify is_crowd _lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) ) # verify class_labels _lowerCAmelCase = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) ) # verify orig_size _lowerCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) ) # verify size _lowerCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) ) @slow def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]: # prepare image, target and masks_path _lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: _lowerCAmelCase = json.loads(f.read() ) _lowerCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target} _lowerCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them _lowerCAmelCase = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' ) _lowerCAmelCase = image_processing(images=_lowerCAmelCase , annotations=_lowerCAmelCase , masks_path=_lowerCAmelCase , return_tensors='pt' ) # verify pixel values _lowerCAmelCase = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['pixel_values'].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCAmelCase , atol=1e-4 ) ) # verify area _lowerCAmelCase = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCAmelCase ) ) # verify boxes _lowerCAmelCase = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCAmelCase ) _lowerCAmelCase = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCAmelCase , atol=1e-3 ) ) # verify image_id _lowerCAmelCase = torch.tensor([3_9769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCAmelCase ) ) # verify is_crowd _lowerCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCAmelCase ) ) # verify class_labels _lowerCAmelCase = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCAmelCase ) ) # verify masks _lowerCAmelCase = 82_2873 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCAmelCase ) # verify orig_size _lowerCAmelCase = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCAmelCase ) ) # verify size _lowerCAmelCase = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCAmelCase ) )
585
1
import inspect import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed import accelerate from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, require_cuda, require_fsdp, require_multi_gpu, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.other import patch_environment set_seed(42) UpperCamelCase = "bert-base-cased" UpperCamelCase = "fp16" UpperCamelCase = "bf16" UpperCamelCase = [FPaa, BFaa] @require_fsdp @require_cuda class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __a ( self :List[str] ): super().setUp() UpperCamelCase__ :str = dict( ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , ) def __a ( self :Union[str, Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy for i, strategy in enumerate(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = self.dist_env.copy() UpperCamelCase__ :List[Any] = f"""{i + 1}""" UpperCamelCase__ :List[Any] = strategy with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :Tuple = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) ) def __a ( self :Union[str, Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = self.dist_env.copy() UpperCamelCase__ :Optional[int] = prefetch_policy with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :Dict = FullyShardedDataParallelPlugin() if prefetch_policy == "NO_PREFETCH": self.assertIsNone(fsdp_plugin.backward_prefetch ) else: self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) ) def __a ( self :Optional[Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(lowerCamelCase__ ): UpperCamelCase__ :Optional[int] = self.dist_env.copy() UpperCamelCase__ :Tuple = state_dict_type with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) ) if state_dict_type == "FULL_STATE_DICT": self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu ) self.assertTrue(fsdp_plugin.state_dict_config.ranka_only ) def __a ( self :List[str] ): UpperCamelCase__ :List[Any] = AutoModel.from_pretrained(lowerCamelCase__ ) for policy in FSDP_AUTO_WRAP_POLICY: UpperCamelCase__ :Optional[int] = self.dist_env.copy() UpperCamelCase__ :int = policy if policy == "TRANSFORMER_BASED_WRAP": UpperCamelCase__ :Optional[Any] = """BertLayer""" elif policy == "SIZE_BASED_WRAP": UpperCamelCase__ :Union[str, Any] = """2000""" with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :int = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ ) if policy == "NO_WRAP": self.assertIsNone(fsdp_plugin.auto_wrap_policy ) else: self.assertIsNotNone(fsdp_plugin.auto_wrap_policy ) UpperCamelCase__ :Optional[int] = self.dist_env.copy() UpperCamelCase__ :str = """TRANSFORMER_BASED_WRAP""" UpperCamelCase__ :Union[str, Any] = """T5Layer""" with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :Any = FullyShardedDataParallelPlugin() with self.assertRaises(lowerCamelCase__ ) as cm: fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ ) self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) ) UpperCamelCase__ :Dict = self.dist_env.copy() UpperCamelCase__ :int = """SIZE_BASED_WRAP""" UpperCamelCase__ :Union[str, Any] = """0""" with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(lowerCamelCase__ ) self.assertIsNone(fsdp_plugin.auto_wrap_policy ) def __a ( self :Optional[Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: UpperCamelCase__ :Dict = self.dist_env.copy() UpperCamelCase__ :Dict = mp_dtype with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = Accelerator() if mp_dtype == "fp16": UpperCamelCase__ :Tuple = torch.floataa elif mp_dtype == "bf16": UpperCamelCase__ :Tuple = torch.bfloataa UpperCamelCase__ :int = MixedPrecision(param_dtype=lowerCamelCase__ , reduce_dtype=lowerCamelCase__ , buffer_dtype=lowerCamelCase__ ) self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase__ ) if mp_dtype == FPaa: self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase__ ) ) elif mp_dtype == BFaa: self.assertIsNone(accelerator.scaler ) AcceleratorState._reset_state(lowerCamelCase__ ) def __a ( self :Optional[Any] ): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: UpperCamelCase__ :List[str] = self.dist_env.copy() UpperCamelCase__ :Dict = str(lowerCamelCase__ ).lower() with mockenv_context(**lowerCamelCase__ ): UpperCamelCase__ :List[str] = FullyShardedDataParallelPlugin() self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase__ ) ) @require_fsdp @require_multi_gpu @slow class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __a ( self :Dict ): super().setUp() UpperCamelCase__ :str = 0.82 UpperCamelCase__ :int = [ """fsdp_shard_grad_op_transformer_based_wrap""", """fsdp_full_shard_transformer_based_wrap""", ] UpperCamelCase__ :int = { """multi_gpu_fp16""": 32_00, """fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00, """fsdp_full_shard_transformer_based_wrap_fp16""": 19_00, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } UpperCamelCase__ :Optional[Any] = 1_60 UpperCamelCase__ :List[str] = 1_60 UpperCamelCase__ :Union[str, Any] = inspect.getfile(accelerate.test_utils ) UpperCamelCase__ :Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] ) def __a ( self :str ): UpperCamelCase__ :int = os.path.join(self.test_scripts_folder , """test_performance.py""" ) UpperCamelCase__ :List[str] = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""] for config in self.performance_configs: UpperCamelCase__ :Optional[Any] = cmd.copy() for i, strategy in enumerate(lowerCamelCase__ ): if strategy.lower() in config: cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" ) break if "fp32" in config: cmd_config.append("""--mixed_precision=no""" ) else: cmd_config.append("""--mixed_precision=fp16""" ) if "cpu_offload" in config: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"""--output_dir={self.tmpdir}""", f"""--performance_lower_bound={self.performance_lower_bound}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) def __a ( self :str ): UpperCamelCase__ :List[Any] = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" ) UpperCamelCase__ :Any = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp""", """--mixed_precision=fp16""", """--fsdp_transformer_layer_cls_to_wrap=BertLayer""", ] for i, strategy in enumerate(lowerCamelCase__ ): UpperCamelCase__ :Optional[Any] = cmd.copy() cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" ) if strategy != "FULL_SHARD": continue UpperCamelCase__ :Optional[int] = len(lowerCamelCase__ ) for state_dict_type in FSDP_STATE_DICT_TYPE: UpperCamelCase__ :Tuple = cmd_config[:state_dict_config_index] cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" ) cmd_config.extend( [ self.test_file_path, f"""--output_dir={self.tmpdir}""", """--partial_train_epoch=1""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) UpperCamelCase__ :List[Any] = cmd_config[:-1] UpperCamelCase__ :Tuple = os.path.join(self.tmpdir , """epoch_0""" ) cmd_config.extend( [ f"""--resume_from_checkpoint={resume_from_checkpoint}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() ) def __a ( self :List[str] ): UpperCamelCase__ :List[str] = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" ) UpperCamelCase__ :Optional[int] = [ """accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", ] for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): UpperCamelCase__ :Optional[int] = cmd.copy() if "fp16" in spec: cmd_config.extend(["""--mixed_precision=fp16"""] ) else: cmd_config.extend(["""--mixed_precision=no"""] ) if "multi_gpu" in spec: continue else: cmd_config.extend(["""--use_fsdp"""] ) for i, strategy in enumerate(lowerCamelCase__ ): if strategy.lower() in spec: cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" ) break if "cpu_offload" in spec: cmd_config.append("""--fsdp_offload_params=True""" ) for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" ) break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" ) elif policy == "SIZE_BASED_WRAP": cmd_config.append("""--fsdp_min_num_params=2000""" ) cmd_config.extend( [ self.test_file_path, f"""--output_dir={self.tmpdir}""", f"""--peak_memory_upper_bound={peak_mem_upper_bound}""", f"""--n_train={self.n_train}""", f"""--n_val={self.n_val}""", ] ) with patch_environment(omp_num_threads=1 ): execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
45
'''simple docstring''' import copy import os import cva import numpy as np from matplotlib import pyplot as plt class SCREAMING_SNAKE_CASE__ : def __init__( self )-> Dict: '''simple docstring''' UpperCamelCase = '' UpperCamelCase = '' UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = 256 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 UpperCamelCase = 0 def UpperCAmelCase_ ( self , A_ )-> str: '''simple docstring''' UpperCamelCase = cva.imread(A_ , 0 ) UpperCamelCase = copy.deepcopy(self.img ) UpperCamelCase , UpperCamelCase , UpperCamelCase = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' ) UpperCamelCase = np.sum(A_ ) for i in range(len(A_ ) ): UpperCamelCase = x[i] / self.k self.sk += prk UpperCamelCase = (self.L - 1) * self.sk if self.rem != 0: UpperCamelCase = int(last % last ) UpperCamelCase = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(A_ ) UpperCamelCase = int(np.ma.count(self.img ) / self.img[1].size ) UpperCamelCase = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCamelCase = self.img[j][i] if num != self.last_list[num]: UpperCamelCase = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' plt.hist(self.img.ravel() , 256 , [0, 256] ) def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCAmelCase : Union[str, Any] = os.path.join(os.path.basename(__file__), 'image_data/input.jpg') lowerCAmelCase : str = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
3
0
def a ( snake_case__: str , snake_case__: Any ): '''simple docstring''' lowercase_ = '''''' for i in table: res += inp[i - 1] return res def a ( snake_case__: Union[str, Any] ): '''simple docstring''' return data[1:] + data[0] def a ( snake_case__: List[Any] , snake_case__: List[Any] ): '''simple docstring''' lowercase_ = '''''' for i in range(len(_A ) ): if a[i] == b[i]: res += "0" else: res += "1" return res def a ( snake_case__: Any , snake_case__: Tuple ): '''simple docstring''' lowercase_ = int('''0b''' + data[0] + data[-1] , 2 ) lowercase_ = int('''0b''' + data[1:3] , 2 ) return bin(s[row][col] )[2:] def a ( snake_case__: Union[str, Any] , snake_case__: int , snake_case__: int , snake_case__: Optional[int] , snake_case__: Tuple ): '''simple docstring''' lowercase_ = message[:4] lowercase_ = message[4:] lowercase_ = apply_table(_A , _A ) lowercase_ = xor(_A , _A ) lowercase_ = apply_sbox(_A , temp[:4] ) # noqa: E741 lowercase_ = apply_sbox(_A , temp[4:] ) lowercase_ = '''0''' * (2 - len(_A )) + l # noqa: E741 lowercase_ = '''0''' * (2 - len(_A )) + r lowercase_ = apply_table(l + r , _A ) lowercase_ = xor(_A , _A ) return temp + right if __name__ == "__main__": __a = input('Enter 10 bit key: ') __a = input('Enter 8 bit message: ') __a = [6, 3, 7, 4, 8, 5, 1_0, 9] __a = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6] __a = [2, 4, 3, 1] __a = [2, 6, 3, 1, 4, 8, 5, 7] __a = [4, 1, 3, 5, 7, 2, 8, 6] __a = [4, 1, 2, 3, 2, 3, 4, 1] __a = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]] __a = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]] # key generation __a = apply_table(key, paa_table) __a = temp[:5] __a = temp[5:] __a = left_shift(left) __a = left_shift(right) __a = apply_table(left + right, pa_table) __a = left_shift(left) __a = left_shift(right) __a = left_shift(left) __a = left_shift(right) __a = apply_table(left + right, pa_table) # encryption __a = apply_table(message, IP) __a = function(expansion, sa, sa, keya, temp) __a = temp[4:] + temp[:4] __a = function(expansion, sa, sa, keya, temp) __a = apply_table(temp, IP_inv) print('Cipher text is:', CT) # decryption __a = apply_table(CT, IP) __a = function(expansion, sa, sa, keya, temp) __a = temp[4:] + temp[:4] __a = function(expansion, sa, sa, keya, temp) __a = apply_table(temp, IP_inv) print('Plain text after decypting is:', PT)
719
from __future__ import annotations from collections.abc import Callable def a ( snake_case__: Callable[[int | float], int | float] , snake_case__: int | float , snake_case__: int | float , snake_case__: int = 100 , ): '''simple docstring''' lowercase_ = x_start lowercase_ = fnc(snake_case__ ) lowercase_ = 0.0 for _ in range(snake_case__ ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase_ = (x_end - x_start) / steps + xa lowercase_ = fnc(snake_case__ ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase_ = xa lowercase_ = fxa return area if __name__ == "__main__": def a ( snake_case__: List[Any] ): '''simple docstring''' return x**3 + x**2 print('f(x) = x^3 + x^2') print('The area between the curve, x = -5, x = 5 and the x axis is:') __a = 1_0 while i <= 1_0_0_0_0_0: print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}") i *= 1_0
409
0
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin A__ : Any = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = BartphoTokenizer lowerCamelCase : Union[str, Any] = False lowerCamelCase : Optional[int] = True def lowercase_ ( self ) -> Optional[Any]: super().setUp() __lowerCamelCase : Any = ['▁This', '▁is', '▁a', '▁t', 'est'] __lowerCamelCase : List[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) ) __lowerCamelCase : Tuple = {'unk_token': '<unk>'} __lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] ) with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) __lowerCamelCase : List[Any] = BartphoTokenizer(SCREAMING_SNAKE_CASE_ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def lowercase_ ( self , **SCREAMING_SNAKE_CASE_ ) -> List[Any]: kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any: __lowerCamelCase : Any = 'This is a là test' __lowerCamelCase : str = 'This is a<unk><unk> test' return input_text, output_text def lowercase_ ( self ) -> Tuple: __lowerCamelCase : Any = BartphoTokenizer(SCREAMING_SNAKE_CASE_ , self.monolingual_vocab_file , **self.special_tokens_map ) __lowerCamelCase : Any = 'This is a là test' __lowerCamelCase : Tuple = '▁This ▁is ▁a ▁l à ▁t est'.split() __lowerCamelCase : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : str = tokens + [tokenizer.unk_token] __lowerCamelCase : Tuple = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
13
"""simple docstring""" from sklearn.metrics import fa_score import datasets lowerCAmelCase__ = ''' The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) ''' lowerCAmelCase__ = ''' Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`. - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives. - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {\'f1\': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results[\'f1\'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric("f1") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results[\'f1\'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro") >>> print(round(results[\'f1\'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted") >>> print(round(results[\'f1\'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {\'f1\': array([0.8, 0. , 0. ])} ''' lowerCAmelCase__ = ''' @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def lowercase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=1 , snake_case__="binary" , snake_case__=None ): """simple docstring""" lowerCAmelCase : Optional[Any] = fa_score( snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ ) return {"f1": float(snake_case__ ) if score.size == 1 else score}
645
0
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class lowerCAmelCase__( __lowercase ): '''simple docstring''' __snake_case = 42 __snake_case = 42 class lowerCAmelCase__( nn.Module ): '''simple docstring''' __snake_case = 42 __snake_case = (1_6, 3_2, 9_6, 2_5_6) __snake_case = jnp.floataa def UpperCamelCase_ ( self ) -> Any: _SCREAMING_SNAKE_CASE : List[str] = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(len(self.block_out_channels ) - 1 ): _SCREAMING_SNAKE_CASE : Any = self.block_out_channels[i] _SCREAMING_SNAKE_CASE : List[Any] = self.block_out_channels[i + 1] _SCREAMING_SNAKE_CASE : Any = nn.Conv( UpperCAmelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : Optional[Any] = nn.Conv( UpperCAmelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = blocks _SCREAMING_SNAKE_CASE : Any = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , __lowerCamelCase ) -> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : Dict = nn.silu(UpperCAmelCase__ ) for block in self.blocks: _SCREAMING_SNAKE_CASE : Any = block(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : Dict = nn.silu(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : List[str] = self.conv_out(UpperCAmelCase__ ) return embedding @flax_register_to_config class lowerCAmelCase__( nn.Module , __lowercase , __lowercase ): '''simple docstring''' __snake_case = 3_2 __snake_case = 4 __snake_case = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) __snake_case = False __snake_case = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) __snake_case = 2 __snake_case = 8 __snake_case = None __snake_case = 1_2_8_0 __snake_case = 0.0 __snake_case = False __snake_case = jnp.floataa __snake_case = True __snake_case = 0 __snake_case = "rgb" __snake_case = (1_6, 3_2, 9_6, 2_5_6) def UpperCamelCase_ ( self , __lowerCamelCase ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size) _SCREAMING_SNAKE_CASE : List[str] = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa ) _SCREAMING_SNAKE_CASE : Tuple = jnp.ones((1,) , dtype=jnp.intaa ) _SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) _SCREAMING_SNAKE_CASE : Any = (1, 3, self.sample_size * 8, self.sample_size * 8) _SCREAMING_SNAKE_CASE : Tuple = jnp.zeros(UpperCAmelCase__ , dtype=jnp.floataa ) _SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : Any = {'''params''': params_rng, '''dropout''': dropout_rng} return self.init(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )["params"] def UpperCamelCase_ ( self ) -> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = self.block_out_channels _SCREAMING_SNAKE_CASE : int = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. _SCREAMING_SNAKE_CASE : int = self.num_attention_heads or self.attention_head_dim # input _SCREAMING_SNAKE_CASE : Any = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time _SCREAMING_SNAKE_CASE : Dict = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) _SCREAMING_SNAKE_CASE : int = FlaxTimestepEmbedding(UpperCAmelCase__ , dtype=self.dtype ) _SCREAMING_SNAKE_CASE : List[Any] = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) _SCREAMING_SNAKE_CASE : Any = self.only_cross_attention if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE : Optional[int] = (num_attention_heads,) * len(self.down_block_types ) # down _SCREAMING_SNAKE_CASE : Any = [] _SCREAMING_SNAKE_CASE : List[str] = [] _SCREAMING_SNAKE_CASE : Any = block_out_channels[0] _SCREAMING_SNAKE_CASE : int = nn.Conv( UpperCAmelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCAmelCase__ ) for i, down_block_type in enumerate(self.down_block_types ): _SCREAMING_SNAKE_CASE : Union[str, Any] = output_channel _SCREAMING_SNAKE_CASE : Any = block_out_channels[i] _SCREAMING_SNAKE_CASE : Dict = i == len(UpperCAmelCase__ ) - 1 if down_block_type == "CrossAttnDownBlock2D": _SCREAMING_SNAKE_CASE : Tuple = FlaxCrossAttnDownBlockaD( in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: _SCREAMING_SNAKE_CASE : int = FlaxDownBlockaD( in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(UpperCAmelCase__ ) for _ in range(self.layers_per_block ): _SCREAMING_SNAKE_CASE : Dict = nn.Conv( UpperCAmelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCAmelCase__ ) if not is_final_block: _SCREAMING_SNAKE_CASE : Optional[int] = nn.Conv( UpperCAmelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : str = down_blocks _SCREAMING_SNAKE_CASE : List[str] = controlnet_down_blocks # mid _SCREAMING_SNAKE_CASE : str = block_out_channels[-1] _SCREAMING_SNAKE_CASE : str = FlaxUNetMidBlockaDCrossAttn( in_channels=UpperCAmelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) _SCREAMING_SNAKE_CASE : Tuple = nn.Conv( UpperCAmelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1.0 , __lowerCamelCase = True , __lowerCamelCase = False , ) -> int: _SCREAMING_SNAKE_CASE : Tuple = self.controlnet_conditioning_channel_order if channel_order == "bgr": _SCREAMING_SNAKE_CASE : Dict = jnp.flip(UpperCAmelCase__ , axis=1 ) # 1. time if not isinstance(UpperCAmelCase__ , jnp.ndarray ): _SCREAMING_SNAKE_CASE : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(UpperCAmelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0: _SCREAMING_SNAKE_CASE : Union[str, Any] = timesteps.astype(dtype=jnp.floataa ) _SCREAMING_SNAKE_CASE : List[Any] = jnp.expand_dims(UpperCAmelCase__ , 0 ) _SCREAMING_SNAKE_CASE : Optional[Any] = self.time_proj(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_embedding(UpperCAmelCase__ ) # 2. pre-process _SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1) ) _SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_in(UpperCAmelCase__ ) _SCREAMING_SNAKE_CASE : List[Any] = jnp.transpose(UpperCAmelCase__ , (0, 2, 3, 1) ) _SCREAMING_SNAKE_CASE : Optional[int] = self.controlnet_cond_embedding(UpperCAmelCase__ ) sample += controlnet_cond # 3. down _SCREAMING_SNAKE_CASE : Optional[Any] = (sample,) for down_block in self.down_blocks: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _SCREAMING_SNAKE_CASE : Optional[int] = down_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train ) else: _SCREAMING_SNAKE_CASE : Union[str, Any] = down_block(UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train ) down_block_res_samples += res_samples # 4. mid _SCREAMING_SNAKE_CASE : Dict = self.mid_block(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , deterministic=not train ) # 5. contronet blocks _SCREAMING_SNAKE_CASE : List[str] = () for down_block_res_sample, controlnet_block in zip(UpperCAmelCase__ , self.controlnet_down_blocks ): _SCREAMING_SNAKE_CASE : Union[str, Any] = controlnet_block(UpperCAmelCase__ ) controlnet_down_block_res_samples += (down_block_res_sample,) _SCREAMING_SNAKE_CASE : Optional[Any] = controlnet_down_block_res_samples _SCREAMING_SNAKE_CASE : Dict = self.controlnet_mid_block(UpperCAmelCase__ ) # 6. scaling _SCREAMING_SNAKE_CASE : Tuple = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=UpperCAmelCase__ , mid_block_res_sample=UpperCAmelCase__ )
718
from PIL import Image def lowerCamelCase__ (__lowerCamelCase ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = image.size _SCREAMING_SNAKE_CASE : Tuple = 0 _SCREAMING_SNAKE_CASE : Dict = image.load() for i in range(__lowerCamelCase ): for j in range(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : List[Any] = pixels[j, i] mean += pixel mean //= width * height for j in range(__lowerCamelCase ): for i in range(__lowerCamelCase ): _SCREAMING_SNAKE_CASE : int = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": UpperCamelCase__ =mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
381
0
import os import sys lowercase = os.path.join(os.path.dirname(__file__), """src""") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) lowercase = [ """torch""", """numpy""", """tokenizers""", """filelock""", """requests""", """tqdm""", """regex""", """sentencepiece""", """sacremoses""", """importlib_metadata""", """huggingface_hub""", ] @add_start_docstrings(AutoConfig.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any], **UpperCamelCase__ : Union[str, Any] ): '''simple docstring''' return AutoConfig.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any], **UpperCamelCase__ : Tuple ): '''simple docstring''' return AutoTokenizer.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ ) @add_start_docstrings(AutoModel.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : str, **UpperCamelCase__ : List[str] ): '''simple docstring''' return AutoModel.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Tuple, **UpperCamelCase__ : Optional[int] ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : List[str], **UpperCamelCase__ : str ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Any, **UpperCamelCase__ : Dict ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def lowerCamelCase_ ( *UpperCamelCase__ : Tuple, **UpperCamelCase__ : List[Any] ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__, **UpperCamelCase__ )
240
def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ): '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) ) else: return a * actual_power(UpperCamelCase__, int(b / 2 ) ) * actual_power(UpperCamelCase__, int(b / 2 ) ) def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ): '''simple docstring''' if b < 0: return 1 / actual_power(UpperCamelCase__, UpperCamelCase__ ) return actual_power(UpperCamelCase__, UpperCamelCase__ ) if __name__ == "__main__": print(power(-2, -3))
240
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""", # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class _SCREAMING_SNAKE_CASE (UpperCamelCase ): lowerCAmelCase = """altclip_text_model""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any]=2_5_0_0_0_2 , UpperCamelCase : int=1_0_2_4 , UpperCamelCase : Dict=2_4 , UpperCamelCase : Dict=1_6 , UpperCamelCase : Dict=4_0_9_6 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Any=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Union[str, Any]=5_1_4 , UpperCamelCase : List[Any]=1 , UpperCamelCase : Any=0.0_2 , UpperCamelCase : Any=0.0_2 , UpperCamelCase : Union[str, Any]=1E-05 , UpperCamelCase : List[str]=1 , UpperCamelCase : Dict=0 , UpperCamelCase : Tuple=2 , UpperCamelCase : Union[str, Any]="absolute" , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=7_6_8 , **UpperCamelCase : str , )->Any: super().__init__(pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase ) __SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Any = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = hidden_act __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Dict = max_position_embeddings __SCREAMING_SNAKE_CASE : List[str] = type_vocab_size __SCREAMING_SNAKE_CASE : Any = initializer_range __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type __SCREAMING_SNAKE_CASE : List[str] = use_cache __SCREAMING_SNAKE_CASE : Any = project_dim class _SCREAMING_SNAKE_CASE (UpperCamelCase ): lowerCAmelCase = """altclip_vision_model""" def __init__( self : Dict , UpperCamelCase : Tuple=7_6_8 , UpperCamelCase : Optional[int]=3_0_7_2 , UpperCamelCase : Optional[Any]=5_1_2 , UpperCamelCase : int=1_2 , UpperCamelCase : List[Any]=1_2 , UpperCamelCase : List[str]=3 , UpperCamelCase : List[Any]=2_2_4 , UpperCamelCase : int=3_2 , UpperCamelCase : Optional[Any]="quick_gelu" , UpperCamelCase : List[Any]=1E-5 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Tuple=0.0_2 , UpperCamelCase : str=1.0 , **UpperCamelCase : Tuple , )->Optional[int]: super().__init__(**UpperCamelCase ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size __SCREAMING_SNAKE_CASE : Dict = intermediate_size __SCREAMING_SNAKE_CASE : Optional[int] = projection_dim __SCREAMING_SNAKE_CASE : int = num_hidden_layers __SCREAMING_SNAKE_CASE : str = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size __SCREAMING_SNAKE_CASE : str = image_size __SCREAMING_SNAKE_CASE : List[str] = initializer_range __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : Tuple = attention_dropout __SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act @classmethod def __snake_case ( cls : Union[str, Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Optional[Any] )->"PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("model_type" ) == "altclip": __SCREAMING_SNAKE_CASE : List[str] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCamelCase , **UpperCamelCase ) class _SCREAMING_SNAKE_CASE (UpperCamelCase ): lowerCAmelCase = """altclip""" lowerCAmelCase = True def __init__( self : Optional[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , UpperCamelCase : Union[str, Any]=7_6_8 , UpperCamelCase : Optional[Any]=2.6_5_9_2 , **UpperCamelCase : int )->Any: # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("text_config_dict" , UpperCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs.pop("vision_config_dict" , UpperCamelCase ) super().__init__(**UpperCamelCase ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __SCREAMING_SNAKE_CASE : Tuple = {} # This is the complete result when using `text_config_dict`. __SCREAMING_SNAKE_CASE : Optional[Any] = AltCLIPTextConfig(**UpperCamelCase ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __SCREAMING_SNAKE_CASE : List[str] = ( F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ F"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ F"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(UpperCamelCase ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __SCREAMING_SNAKE_CASE : int = {} # This is the complete result when using `vision_config_dict`. __SCREAMING_SNAKE_CASE : Optional[int] = AltCLIPVisionConfig(**UpperCamelCase ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __SCREAMING_SNAKE_CASE : List[Any] = { str(UpperCamelCase ): value for key, value in _vision_config_dict["id2label"].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __SCREAMING_SNAKE_CASE : Dict = ( F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __SCREAMING_SNAKE_CASE : Dict = ( F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ F"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(UpperCamelCase ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __SCREAMING_SNAKE_CASE : List[Any] = {} logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." ) if vision_config is None: __SCREAMING_SNAKE_CASE : List[str] = {} logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." ) __SCREAMING_SNAKE_CASE : Tuple = AltCLIPTextConfig(**UpperCamelCase ) __SCREAMING_SNAKE_CASE : Tuple = AltCLIPVisionConfig(**UpperCamelCase ) __SCREAMING_SNAKE_CASE : List[Any] = projection_dim __SCREAMING_SNAKE_CASE : int = logit_scale_init_value __SCREAMING_SNAKE_CASE : List[Any] = 1.0 @classmethod def __snake_case ( cls : Dict , UpperCamelCase : AltCLIPTextConfig , UpperCamelCase : AltCLIPVisionConfig , **UpperCamelCase : int )->Dict: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase ) def __snake_case ( self : str )->List[str]: __SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE : Dict = self.text_config.to_dict() __SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict() __SCREAMING_SNAKE_CASE : int = self.__class__.model_type return output
447
import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate _lowerCamelCase = trt.Logger(trt.Logger.WARNING) _lowerCamelCase = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) _lowerCamelCase = logging.getLogger(__name__) _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=384, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=128, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) _lowerCamelCase = parser.parse_args() if args.tokenizer_name: _lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) _lowerCamelCase = args.per_device_eval_batch_size _lowerCamelCase = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties _lowerCamelCase = True _lowerCamelCase = """temp_engine/bert-fp32.engine""" if args.fpaa: _lowerCamelCase = """temp_engine/bert-fp16.engine""" if args.inta: _lowerCamelCase = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") _lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network _lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)] _lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: _lowerCamelCase = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) _lowerCamelCase = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) _lowerCamelCase = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(inputs["input_ids"] , dtype=np.intaa ) __SCREAMING_SNAKE_CASE : Any = np.asarray(inputs["attention_mask"] , dtype=np.intaa ) __SCREAMING_SNAKE_CASE : str = np.asarray(inputs["token_type_ids"] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __lowerCamelCase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __lowerCamelCase ) # start time __SCREAMING_SNAKE_CASE : str = time.time() # Run inference context.execute_async( bindings=[int(__lowerCamelCase ) for d_inp in d_inputs] + [int(__lowerCamelCase ), int(__lowerCamelCase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) cuda.memcpy_dtoh_async(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Synchronize the stream and take time stream.synchronize() # end time __SCREAMING_SNAKE_CASE : List[Any] = time.time() __SCREAMING_SNAKE_CASE : List[Any] = end_time - start_time __SCREAMING_SNAKE_CASE : Tuple = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. _lowerCamelCase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. _lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. _lowerCamelCase = raw_datasets["""validation"""].column_names _lowerCamelCase = """question""" if """question""" in column_names else column_names[0] _lowerCamelCase = """context""" if """context""" in column_names else column_names[1] _lowerCamelCase = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). _lowerCamelCase = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) _lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length) def _lowerCAmelCase ( __lowerCamelCase : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. __SCREAMING_SNAKE_CASE : List[str] = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=__lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , padding="max_length" , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenized_examples.pop("overflow_to_sample_mapping" ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. __SCREAMING_SNAKE_CASE : List[str] = [] for i in range(len(tokenized_examples["input_ids"] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). __SCREAMING_SNAKE_CASE : Optional[Any] = tokenized_examples.sequence_ids(__lowerCamelCase ) __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. __SCREAMING_SNAKE_CASE : str = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. __SCREAMING_SNAKE_CASE : Optional[Any] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i] ) ] return tokenized_examples _lowerCamelCase = raw_datasets["""validation"""] # Validation Feature Creation _lowerCamelCase = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) _lowerCamelCase = default_data_collator _lowerCamelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) _lowerCamelCase = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def _lowerCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : int="eval" ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = postprocess_qa_predictions( examples=__lowerCamelCase , features=__lowerCamelCase , predictions=__lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__lowerCamelCase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: __SCREAMING_SNAKE_CASE : Optional[int] = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: __SCREAMING_SNAKE_CASE : Union[str, Any] = [{"id": k, "prediction_text": v} for k, v in predictions.items()] __SCREAMING_SNAKE_CASE : Optional[Any] = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=__lowerCamelCase , label_ids=__lowerCamelCase ) _lowerCamelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def _lowerCAmelCase ( __lowerCamelCase : Optional[int] ): """simple docstring""" return trt.volume(engine.get_binding_shape(__lowerCamelCase ) ) * engine.get_binding_dtype(__lowerCamelCase ).itemsize # Allocate device memory for inputs and outputs. _lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer _lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) _lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) _lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes) _lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. _lowerCamelCase = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(f''' Num examples = {len(eval_dataset)}''') logger.info(f''' Batch size = {args.per_device_eval_batch_size}''') _lowerCamelCase = 0.0 _lowerCamelCase = 0 _lowerCamelCase = timeit.default_timer() _lowerCamelCase = None for step, batch in enumerate(eval_dataloader): _lowerCamelCase , _lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 _lowerCamelCase , _lowerCamelCase = outputs _lowerCamelCase = torch.tensor(start_logits) _lowerCamelCase = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered _lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) _lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) _lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) _lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: _lowerCamelCase = nested_truncate(all_preds, len(eval_dataset)) _lowerCamelCase = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1000 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1000)) logger.info("""Total Number of Inference = %d""", niter) _lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds) _lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f'''Evaluation metrics: {eval_metric}''')
447
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a_ : List[Any] = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : str = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : List[str] = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ : Optional[Any] = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
73
'''simple docstring''' from __future__ import annotations def __UpperCAmelCase ( a_: str ): return [ord(a_ ) - 96 for elem in plain] def __UpperCAmelCase ( a_: list[int] ): return "".join(chr(elem + 96 ) for elem in encoded ) def __UpperCAmelCase ( ): _UpperCAmelCase : List[str] = encode(input("-> " ).strip().lower() ) print("Encoded: ", a_ ) print("Decoded:", decode(a_ ) ) if __name__ == "__main__": main()
494
0
def a ( _UpperCAmelCase : list ): '''simple docstring''' if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or x < 0 for x in sequence ): raise TypeError('''Sequence must be list of non-negative integers''' ) for _ in range(len(_UpperCAmelCase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCAmelCase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
241
from functools import reduce __A =( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def a ( _UpperCAmelCase : str = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_UpperCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f'''{solution() = }''')
241
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : str = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ : Optional[int] = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ : Optional[int] = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ : str = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ : List[Any] = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ : Union[str, Any] = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ : Any = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ : str = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ : Optional[Any] = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ : int = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ : List[str] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ : Union[str, Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Any = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ : Dict = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ : Optional[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ : Tuple = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ : List[Any] = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ : str = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Optional[int] = FLAX_MODEL_MAPPING UpperCAmelCase_ : List[str] = auto_class_update(FlaxAutoModel) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ : Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ : Union[str, Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ : int = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : str = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ : int = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : List[Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : List[str] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ : Dict = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ : str = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Optional[Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ : Optional[Any] = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class UpperCamelCase ( _BaseAutoModelClass ): lowerCAmelCase : Optional[int] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ : int = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
491
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer a_ = logging.get_logger(__name__) a_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} a_ = { """vocab_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json""" }, """merges_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt""" }, } a_ = {"""allegro/herbert-base-cased""": 514} a_ = {} class UpperCAmelCase__ ( snake_case ): """simple docstring""" lowerCAmelCase__ : List[str] = VOCAB_FILES_NAMES lowerCAmelCase__ : int = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ : Tuple = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ : Tuple = HerbertTokenizer def __init__( self: Optional[Any] , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: Optional[int]=None , __lowerCAmelCase: List[str]=None , __lowerCAmelCase: str="<s>" , __lowerCAmelCase: List[str]="<unk>" , __lowerCAmelCase: Optional[int]="<pad>" , __lowerCAmelCase: Optional[Any]="<mask>" , __lowerCAmelCase: Union[str, Any]="</s>" , **__lowerCAmelCase: List[Any] , ) -> Tuple: '''simple docstring''' super().__init__( __lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , **__lowerCAmelCase , ) def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase = [self.cls_token_id] __UpperCAmelCase = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None , __lowerCAmelCase: bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1] def _UpperCAmelCase ( self: int , __lowerCAmelCase: List[int] , __lowerCAmelCase: Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: str , __lowerCAmelCase: Optional[str] = None ) -> Tuple[str]: '''simple docstring''' __UpperCAmelCase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase ) return tuple(__lowerCAmelCase )
221
0
'''simple docstring''' import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex snake_case = logging.getLogger(__name__) class lowerCAmelCase : def __init__( self : Optional[int] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = False def _A ( self : Dict , a__ : Optional[int] , a__ : Optional[int] , a__ : List[Any] , a__ : List[Any] ): '''simple docstring''' if not self.initialized: lowerCAmelCase__ : Union[str, Any] = RagRetriever( a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , index=a__ , init_retrieval=a__ , ) lowerCAmelCase__ : int = True def _A ( self : int ): '''simple docstring''' self.retriever.index.init_index() def _A ( self : Optional[Any] , a__ : Optional[Any] , a__ : Any ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.retriever._main_retrieve(a__ , a__ ) return doc_ids, retrieved_doc_embeds class lowerCAmelCase ( UpperCamelCase_ ): def __init__( self : Dict , a__ : int , a__ : Any , a__ : str , a__ : Dict , a__ : str=None ): '''simple docstring''' if index is not None and index.is_initialized() and len(a__ ) > 0: raise ValueError( "When using Ray for distributed fine-tuning, " "you'll need to provide the paths instead, " "as the dataset and the index are loaded " "separately. More info in examples/rag/use_own_knowledge_dataset.py " ) super().__init__( a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , index=a__ , init_retrieval=a__ , ) lowerCAmelCase__ : Optional[Any] = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(a__ , a__ , a__ , a__ ) for worker in self.retrieval_workers ] ) def _A ( self : Dict ): '''simple docstring''' logger.info("initializing retrieval" ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def _A ( self : Union[str, Any] , a__ : Tuple , a__ : int ): '''simple docstring''' if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. lowerCAmelCase__ : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = ray.get(random_worker.retrieve.remote(a__ , a__ ) ) else: lowerCAmelCase__ , lowerCAmelCase__ : int = self._main_retrieve(a__ , a__ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a__ ) @classmethod def _A ( cls : List[Any] , a__ : int , a__ : Dict=None , **a__ : List[str] ): '''simple docstring''' return super(a__ , cls ).get_tokenizers(a__ , a__ , **a__ ) @classmethod def _A ( cls : int , a__ : Any , a__ : Union[str, Any] , a__ : int=None , **a__ : str ): '''simple docstring''' lowerCAmelCase__ : List[str] = kwargs.pop("config" , a__ ) or RagConfig.from_pretrained(a__ , **a__ ) lowerCAmelCase__ : List[str] = RagTokenizer.from_pretrained(a__ , config=a__ ) lowerCAmelCase__ : List[str] = rag_tokenizer.question_encoder lowerCAmelCase__ : List[Any] = rag_tokenizer.generator if indexed_dataset is not None: lowerCAmelCase__ : str = "custom" lowerCAmelCase__ : Union[str, Any] = CustomHFIndex(config.retrieval_vector_size , a__ ) else: lowerCAmelCase__ : int = cls._build_index(a__ ) return cls( a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , retrieval_workers=a__ , index=a__ , )
568
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() snake_case = logging.get_logger("""transformers.models.speecht5""") def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" hf_model.apply_weight_norm() lowerCAmelCase__ : Tuple = checkpoint["input_conv.weight_g"] lowerCAmelCase__ : Dict = checkpoint["input_conv.weight_v"] lowerCAmelCase__ : Tuple = checkpoint["input_conv.bias"] for i in range(len(config.upsample_rates ) ): lowerCAmelCase__ : int = checkpoint[f'''upsamples.{i}.1.weight_g'''] lowerCAmelCase__ : List[Any] = checkpoint[f'''upsamples.{i}.1.weight_v'''] lowerCAmelCase__ : List[Any] = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): lowerCAmelCase__ : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] lowerCAmelCase__ : List[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] lowerCAmelCase__ : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] lowerCAmelCase__ : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] lowerCAmelCase__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] lowerCAmelCase__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] lowerCAmelCase__ : int = checkpoint["output_conv.1.weight_g"] lowerCAmelCase__ : Optional[int] = checkpoint["output_conv.1.weight_v"] lowerCAmelCase__ : str = checkpoint["output_conv.1.bias"] hf_model.remove_weight_norm() @torch.no_grad() def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ): """simple docstring""" if config_path is not None: lowerCAmelCase__ : int = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ ) else: lowerCAmelCase__ : int = SpeechTaHifiGanConfig() lowerCAmelCase__ : Tuple = SpeechTaHifiGan(lowerCamelCase_ ) lowerCAmelCase__ : Tuple = torch.load(lowerCamelCase_ ) load_weights(orig_checkpoint["model"]["generator"] , lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ : int = np.load(lowerCamelCase_ ) lowerCAmelCase__ : Union[str, Any] = stats[0].reshape(-1 ) lowerCAmelCase__ : int = stats[1].reshape(-1 ) lowerCAmelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).float() lowerCAmelCase__ : List[Any] = torch.from_numpy(lowerCamelCase_ ).float() model.save_pretrained(lowerCamelCase_ ) if repo_id: print("Pushing to the hub..." ) model.push_to_hub(lowerCamelCase_ ) if __name__ == "__main__": snake_case = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) snake_case = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
568
1
from __future__ import annotations from typing import Any class A__ : '''simple docstring''' def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Dict ): """simple docstring""" UpperCamelCase = num_of_nodes UpperCamelCase = [] UpperCamelCase = {} def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] ): """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : Union[str, Any] ): """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: UpperCamelCase = self.find_component(__SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ): """simple docstring""" if component_size[u_node] <= component_size[v_node]: UpperCamelCase = v_node component_size[v_node] += component_size[u_node] self.set_component(__SCREAMING_SNAKE_CASE ) elif component_size[u_node] >= component_size[v_node]: UpperCamelCase = self.find_component(__SCREAMING_SNAKE_CASE ) component_size[u_node] += component_size[v_node] self.set_component(__SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCamelCase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCamelCase = edge UpperCamelCase = self.m_component[u] UpperCamelCase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCamelCase = [u, v, w] for edge in minimum_weight_edge: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): UpperCamelCase = edge UpperCamelCase = self.m_component[u] UpperCamelCase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' ) num_of_components -= 1 UpperCamelCase = [-1] * self.m_num_of_nodes print(f'The total weight of the minimal spanning tree is: {mst_weight}' ) def lowercase__ ( ) -> int: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
280
from __future__ import annotations def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): """simple docstring""" snake_case__ : list[list[int]] =[] create_all_state(1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , [] , SCREAMING_SNAKE_CASE ) return result def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : list[list[int]] , ): """simple docstring""" if level == 0: total_list.append(current_list[:] ) return for i in range(SCREAMING_SNAKE_CASE , total_number - level + 2 ): current_list.append(SCREAMING_SNAKE_CASE ) create_all_state(i + 1 , SCREAMING_SNAKE_CASE , level - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) current_list.pop() def lowercase_ ( SCREAMING_SNAKE_CASE : list[list[int]] ): """simple docstring""" for i in total_list: print(*SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCamelCase__ = 4 lowerCamelCase__ = 2 lowerCamelCase__ = generate_all_combinations(n, k) print_all_state(total_list)
381
0
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __lowerCamelCase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''') __lowerCamelCase = ( subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split() ) __lowerCamelCase = '''|'''.join(sys.argv[1:]) __lowerCamelCase = re.compile(Rf'''^({joined_dirs}).*?\.py$''') __lowerCamelCase = [x for x in modified_files if regex.match(x)] print(''' '''.join(relevant_modified_files), end='''''')
478
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _a ( __UpperCamelCase=None , __UpperCamelCase=None ): return field(default_factory=lambda: default , metadata=__UpperCamelCase ) @dataclass class a__ : lowerCamelCase__: str = field( metadata={"""help""": """The csv file to plot."""} , ) lowerCamelCase__: bool = field( default=lowerCAmelCase_ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , ) lowerCamelCase__: bool = field( default=lowerCAmelCase_ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , ) lowerCamelCase__: bool = field( default=lowerCAmelCase_ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , ) lowerCamelCase__: bool = field( default=lowerCAmelCase_ , metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" } , ) lowerCamelCase__: Optional[str] = field( default=lowerCAmelCase_ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , ) lowerCamelCase__: Optional[List[str]] = list_field( default=lowerCAmelCase_ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def _a ( __UpperCamelCase ): try: int(__UpperCamelCase ) return True except ValueError: return False def _a ( __UpperCamelCase ): try: float(__UpperCamelCase ) return True except ValueError: return False class a__ : def __init__( self : int , lowerCamelCase_ : int ): a_ : Union[str, Any] = args a_ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: a_ : List[Any] = csv.DictReader(lowerCamelCase_ ) for row in reader: a_ : List[str] = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None a_ : Optional[Any] = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None a_ : Dict = float(row["""result"""] ) def UpperCAmelCase( self : Union[str, Any] ): a_ , a_ : Tuple = plt.subplots() a_ : List[Any] = """Time usage""" if self.args.is_time else """Memory usage""" a_ : Optional[int] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): a_ : List[Any] = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) a_ : Union[str, Any] = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) a_ : List[str] = self.result_dict[model_name]["""result"""] ((a_) , (a_)) : List[Any] = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) a_ : str = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: a_ : Optional[int] = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=lowerCamelCase_ , ) else: a_ : int = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((a_) , (a_)) : List[str] = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) a_ : List[Any] = np.asarray(lowerCamelCase_ , lowerCamelCase_ )[: len(lowerCamelCase_ )] plt.scatter( lowerCamelCase_ , lowerCamelCase_ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(lowerCamelCase_ , lowerCamelCase_ , """--""" ) title_str += F''' {label_model_name} vs.''' a_ : int = title_str[:-4] a_ : Tuple = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(lowerCamelCase_ ) plt.xlabel(lowerCamelCase_ ) plt.ylabel(lowerCamelCase_ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def _a ( ): a_ : Tuple = HfArgumentParser(__UpperCamelCase ) a_ : Dict = parser.parse_args_into_dataclasses()[0] a_ : Dict = Plot(args=__UpperCamelCase ) plot.plot() if __name__ == "__main__": main()
478
1
def a ( A__ , A__ ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = len(A__ ) print('''The following activities are selected:''' ) # The first activity is always selected SCREAMING_SNAKE_CASE__ : List[str] = 0 print(A__ , end=''',''' ) # Consider rest of the activities for j in range(A__ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(A__ , end=''',''' ) SCREAMING_SNAKE_CASE__ : int = j if __name__ == "__main__": import doctest doctest.testmod() a_ :Dict = [1, 3, 0, 5, 8, 5] a_ :int = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
35
'''simple docstring''' lowerCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)] def __A ( a_ : int ): lowerCAmelCase : Optional[int] = 0 while number: # Increased Speed Slightly by checking every 5 digits together. sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0] number //= 1_0_0_0_0_0 return sum_of_digits_squared # There are 2 Chains made, # One ends with 89 with the chain member 58 being the one which when declared first, # there will be the least number of iterations for all the members to be checked. # The other one ends with 1 and has only one element 1. # So 58 and 1 are chosen to be declared at the starting. # Changed dictionary to an array to quicken the solution lowerCAmelCase = [None] * 10_00_00_00 lowerCAmelCase = True lowerCAmelCase = False def __A ( a_ : int ): if CHAINS[number - 1] is not None: return CHAINS[number - 1] # type: ignore lowerCAmelCase : Dict = chain(next_number(a_ ) ) lowerCAmelCase : Union[str, Any] = number_chain while number < 1_0_0_0_0_0_0_0: lowerCAmelCase : Any = number_chain number *= 1_0 return number_chain def __A ( a_ : int = 1_0_0_0_0_0_0_0 ): for i in range(1 ,a_ ): if CHAINS[i] is None: chain(i + 1 ) return CHAINS[:number].count(a_ ) if __name__ == "__main__": import doctest doctest.testmod() print(F'''{solution() = }''')
525
0
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ): '''simple docstring''' UpperCAmelCase_ : List[str] = len(_lowercase ), len(grid[0] ) if ( min(_lowercase , _lowercase ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) UpperCAmelCase_ : Optional[int] = 0 count += depth_first_search(_lowercase , row + 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , row - 1 , _lowercase , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col + 1 , _lowercase ) count += depth_first_search(_lowercase , _lowercase , col - 1 , _lowercase ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
701
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): __a = yaml.safe_load( '\\nname: ""\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: "Dataset Card for X" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: "Table of Contents"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Dataset Description"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: "Dataset Summary"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: "Supported Tasks and Leaderboards"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n' ) __a = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = { 'name': 'root', 'text': '', 'is_empty_text': True, 'subsections': [ { 'name': 'Dataset Card for My Dataset', 'text': '', 'is_empty_text': True, 'subsections': [ {'name': 'Table of Contents', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': []}, { 'name': 'Dataset Description', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Dataset Summary', 'text': 'Some text here.', 'is_empty_text': False, 'subsections': [ { 'name': 'Extra Ignored Subsection', 'text': '', 'is_empty_text': True, 'subsections': [], } ], }, { 'name': 'Supported Tasks and Leaderboards', 'text': '', 'is_empty_text': True, 'subsections': [], }, {'name': 'Languages', 'text': 'Language Text', 'is_empty_text': False, 'subsections': []}, ], }, ], } ], } __a = '\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = ( 'The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.' ) __a = '\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = ( 'The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.' ) __a = '\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = 'The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n' __a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n' __a = 'The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n' __a = 'The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n' __a = 'The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = 'The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.' __a = '' __a = 'The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.' __a = '\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n' __a = 'The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.' @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' assert ReadMe.from_string(_lowercase , _lowercase ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ): UpperCAmelCase_ : Union[str, Any] = ReadMe.from_string(_lowercase , _lowercase ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with pytest.raises(_lowercase , match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(_lowercase , _lowercase ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' ReadMe.from_string(_lowercase , _lowercase , suppress_parsing_errors=_lowercase ) @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Dict = Path(_lowercase ) / '''README.md''' with open(_lowercase , '''w+''' ) as readme_file: readme_file.write(_lowercase ) UpperCAmelCase_ : Optional[int] = ReadMe.from_readme(_lowercase , _lowercase ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Optional[int] = Path(_lowercase ) / '''README.md''' with open(_lowercase , '''w+''' ) as readme_file: readme_file.write(_lowercase ) UpperCAmelCase_ : List[Any] = expected_error.format(path=_lowercase ) with pytest.raises(_lowercase , match=re.escape(_lowercase ) ): UpperCAmelCase_ : Any = ReadMe.from_readme(_lowercase , _lowercase ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _lowercase , _lowercase ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : List[Any] = Path(_lowercase ) / '''README.md''' with open(_lowercase , '''w+''' ) as readme_file: readme_file.write(_lowercase ) UpperCAmelCase_ : List[str] = expected_error.format(path=_lowercase ) with pytest.raises(_lowercase , match=re.escape(_lowercase ) ): ReadMe.from_readme(_lowercase , _lowercase ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def lowerCamelCase__ ( _lowercase ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Dict = Path(_lowercase ) / '''README.md''' with open(_lowercase , '''w+''' ) as readme_file: readme_file.write(_lowercase ) ReadMe.from_readme(_lowercase , _lowercase , suppress_parsing_errors=_lowercase )
300
0
import gc import math import unittest import torch from diffusers import UNetaDModel from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin __UpperCAmelCase = logging.get_logger(__name__) enable_full_determinism() class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : int = UNetaDModel UpperCAmelCase__ : List[Any] = "sample" @property def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : List[Any] = 4 UpperCamelCase : Any = 3 UpperCamelCase : List[Any] = (32, 32) UpperCamelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ ) return {"sample": noise, "timestep": time_step} @property def snake_case_ ( self ) -> List[str]: return (3, 32, 32) @property def snake_case_ ( self ) -> str: return (3, 32, 32) def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = { 'block_out_channels': (32, 64), 'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'), 'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'), 'attention_head_dim': 3, 'out_channels': 3, 'in_channels': 3, 'layers_per_block': 2, 'sample_size': 32, } UpperCamelCase : Tuple = self.dummy_input return init_dict, inputs_dict class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : Optional[int] = UNetaDModel UpperCAmelCase__ : Optional[int] = "sample" @property def snake_case_ ( self ) -> Any: UpperCamelCase : List[str] = 4 UpperCamelCase : Tuple = 4 UpperCamelCase : Tuple = (32, 32) UpperCamelCase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ ) return {"sample": noise, "timestep": time_step} @property def snake_case_ ( self ) -> Optional[int]: return (4, 32, 32) @property def snake_case_ ( self ) -> Union[str, Any]: return (4, 32, 32) def snake_case_ ( self ) -> str: UpperCamelCase : Union[str, Any] = { 'sample_size': 32, 'in_channels': 4, 'out_channels': 4, 'layers_per_block': 2, 'block_out_channels': (32, 64), 'attention_head_dim': 32, 'down_block_types': ('DownBlock2D', 'DownBlock2D'), 'up_block_types': ('UpBlock2D', 'UpBlock2D'), } UpperCamelCase : Optional[Any] = self.dummy_input return init_dict, inputs_dict def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase , UpperCamelCase : Any = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(loading_info['missing_keys'] ), 0 ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[int] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU' ) def snake_case_ ( self ) -> Any: UpperCamelCase , UpperCamelCase : int = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = model(**self.dummy_input ).sample assert image is not None, "Make sure output is not None" @unittest.skipIf(torch_device != 'cuda', 'This test is supposed to run on GPU' ) def snake_case_ ( self ) -> str: # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` UpperCamelCase , UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update', output_loading_info=SCREAMING_SNAKE_CASE_ ) model_accelerate.to(SCREAMING_SNAKE_CASE_ ) model_accelerate.eval() UpperCamelCase : str = torch.randn( 1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0 ), ) UpperCamelCase : Union[str, Any] = noise.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = model_accelerate(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )['sample'] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() UpperCamelCase , UpperCamelCase : Optional[Any] = UNetaDModel.from_pretrained( 'fusing/unet-ldm-dummy-update', output_loading_info=SCREAMING_SNAKE_CASE_, low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ ) model_normal_load.to(SCREAMING_SNAKE_CASE_ ) model_normal_load.eval() UpperCamelCase : Tuple = model_normal_load(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )['sample'] assert torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-3 ) def snake_case_ ( self ) -> List[str]: UpperCamelCase : Union[str, Any] = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ) model.eval() model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), ) UpperCamelCase : Optional[Any] = noise.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase : int = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off UpperCamelCase : Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] ) # fmt: on self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-3 ) ) class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : str = UNetaDModel UpperCAmelCase__ : Optional[Any] = "sample" @property def snake_case_ ( self, SCREAMING_SNAKE_CASE_=(32, 32) ) -> Any: UpperCamelCase : List[Any] = 4 UpperCamelCase : str = 3 UpperCamelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa, device=SCREAMING_SNAKE_CASE_ ) return {"sample": noise, "timestep": time_step} @property def snake_case_ ( self ) -> str: return (3, 32, 32) @property def snake_case_ ( self ) -> Optional[Any]: return (3, 32, 32) def snake_case_ ( self ) -> Tuple: UpperCamelCase : List[str] = { 'block_out_channels': [32, 64, 64, 64], 'in_channels': 3, 'layers_per_block': 1, 'out_channels': 3, 'time_embedding_type': 'fourier', 'norm_eps': 1e-6, 'mid_block_scale_factor': math.sqrt(2.0 ), 'norm_num_groups': None, 'down_block_types': [ 'SkipDownBlock2D', 'AttnSkipDownBlock2D', 'SkipDownBlock2D', 'SkipDownBlock2D', ], 'up_block_types': [ 'SkipUpBlock2D', 'SkipUpBlock2D', 'AttnSkipUpBlock2D', 'SkipUpBlock2D', ], } UpperCamelCase : Union[str, Any] = self.dummy_input return init_dict, inputs_dict @slow def snake_case_ ( self ) -> Any: UpperCamelCase , UpperCamelCase : Tuple = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256', output_loading_info=SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(loading_info['missing_keys'] ), 0 ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = self.dummy_input UpperCamelCase : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = noise UpperCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ) assert image is not None, "Make sure output is not None" @slow def snake_case_ ( self ) -> List[str]: UpperCamelCase : List[Any] = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = 4 UpperCamelCase : Any = 3 UpperCamelCase : Union[str, Any] = (256, 256) UpperCamelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = torch.tensor(batch_size * [1e-4] ).to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase : str = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCamelCase : Tuple = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] ) # fmt: on self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-2 ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Tuple = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' ) model.to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = 4 UpperCamelCase : Dict = 3 UpperCamelCase : str = (32, 32) UpperCamelCase : int = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = torch.tensor(batch_size * [1e-4] ).to(SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): UpperCamelCase : Any = model(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ).sample UpperCamelCase : Union[str, Any] = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off UpperCamelCase : Tuple = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] ) # fmt: on self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, rtol=1e-2 ) ) def snake_case_ ( self ) -> Optional[int]: # not required for this model pass
40
import os import tempfile import unittest import uuid from pathlib import Path from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available if is_torch_available(): import torch if is_soundfile_availble(): import soundfile as sf if is_vision_available(): from PIL import Image def UpperCamelCase ( snake_case__ : Tuple="" ) -> str: UpperCamelCase : Union[str, Any] = tempfile.mkdtemp() return os.path.join(snake_case__ , str(uuid.uuida() ) + suffix ) @require_soundfile @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> int: UpperCamelCase : Union[str, Any] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) del agent_type # Ensure the path remains even after the object deletion self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) # Ensure that the file contains the same value as the original tensor UpperCamelCase , UpperCamelCase : Any = sf.read(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, torch.tensor(SCREAMING_SNAKE_CASE_ ), atol=1e-4 ) ) def snake_case_ ( self ) -> Any: UpperCamelCase : Optional[int] = torch.rand(12, dtype=torch.floataa ) - 0.5 UpperCamelCase : Union[str, Any] = get_new_path(suffix='.wav' ) sf.write(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, 1_6000 ) UpperCamelCase : int = AgentAudio(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type.to_raw(), atol=1e-4 ) ) self.assertEqual(agent_type.to_string(), SCREAMING_SNAKE_CASE_ ) @require_vision @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Any: UpperCamelCase : Dict = torch.randint(0, 256, (64, 64, 3) ) UpperCamelCase : Union[str, Any] = AgentImage(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = str(agent_type.to_string() ) # Ensure that the tensor and the agent_type's tensor are the same self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_, agent_type._tensor, atol=1e-4 ) ) self.assertIsInstance(agent_type.to_raw(), Image.Image ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> Optional[int]: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Optional[int] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Any = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertTrue(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) def snake_case_ ( self ) -> int: UpperCamelCase : Optional[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' UpperCamelCase : Union[str, Any] = Image.open(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Dict = AgentImage(SCREAMING_SNAKE_CASE_ ) self.assertFalse(path.samefile(agent_type.to_string() ) ) self.assertTrue(image == agent_type.to_raw() ) # Ensure the path remains even after the object deletion del agent_type self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) ) class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> Optional[Any]: UpperCamelCase : Any = 'Hey!' UpperCamelCase : Dict = AgentText(SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_string() ) self.assertEqual(SCREAMING_SNAKE_CASE_, agent_type.to_raw() ) self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
40
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowercase : Union[str, Any] = logging.get_logger(__name__) if is_vision_available(): import PIL class A__ ( __UpperCAmelCase ): """simple docstring""" __A : str = ['''pixel_values'''] def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None: '''simple docstring''' super().__init__(**lowercase) a__ : Union[str, Any] = size if size is not None else {'shortest_edge': 224} a__ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase) a__ : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224} a__ : Union[str, Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size') a__ : Optional[int] = do_resize a__ : List[str] = size a__ : Optional[int] = resample a__ : Union[str, Any] = do_center_crop a__ : Dict = crop_size a__ : List[Any] = do_rescale a__ : Tuple = rescale_factor a__ : int = do_normalize a__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN a__ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD a__ : Optional[int] = do_convert_rgb def __lowercase ( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' a__ : str = get_size_dict(lowercase , default_to_square=lowercase) if "shortest_edge" not in size: raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}') a__ : int = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase) return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase) def __lowercase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' a__ : str = get_size_dict(lowercase) if "height" not in size or "width" not in size: raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}') return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase) def __lowercase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> str: '''simple docstring''' return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase) def __lowercase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray: '''simple docstring''' return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase) def __lowercase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image: '''simple docstring''' a__ : Dict = do_resize if do_resize is not None else self.do_resize a__ : Union[str, Any] = size if size is not None else self.size a__ : str = get_size_dict(lowercase , param_name='size' , default_to_square=lowercase) a__ : Union[str, Any] = resample if resample is not None else self.resample a__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop a__ : Optional[int] = crop_size if crop_size is not None else self.crop_size a__ : Any = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase) a__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale a__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor a__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize a__ : int = image_mean if image_mean is not None else self.image_mean a__ : Dict = image_std if image_std is not None else self.image_std a__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb a__ : List[Any] = make_list_of_images(lowercase) if not valid_images(lowercase): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # PIL RGBA images are converted to RGB if do_convert_rgb: a__ : Any = [convert_to_rgb(lowercase) for image in images] # All transformations expect numpy arrays. a__ : Union[str, Any] = [to_numpy_array(lowercase) for image in images] if do_resize: a__ : Union[str, Any] = [self.resize(image=lowercase , size=lowercase , resample=lowercase) for image in images] if do_center_crop: a__ : Union[str, Any] = [self.center_crop(image=lowercase , size=lowercase) for image in images] if do_rescale: a__ : str = [self.rescale(image=lowercase , scale=lowercase) for image in images] if do_normalize: a__ : Any = [self.normalize(image=lowercase , mean=lowercase , std=lowercase) for image in images] a__ : Tuple = [to_channel_dimension_format(lowercase , lowercase) for image in images] a__ : Union[str, Any] = {'pixel_values': images} return BatchFeature(data=lowercase , tensor_type=lowercase)
715
import collections import os import re from pathlib import Path lowercase : int = """src/transformers""" # Matches is_xxx_available() lowercase : List[str] = re.compile(r"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowercase : str = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowercase : List[str] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowercase : List[Any] = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowercase : Optional[int] = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowercase : Optional[Any] = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowercase : List[Any] = re.compile(r"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowercase : Tuple = re.compile(r"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowercase : str = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowercase : int = re.compile(r"""^\s*try:""") # Catches a line with else: lowercase : List[str] = re.compile(r"""^\s*else:""") def A_ ( A__ ) -> Optional[int]: if _re_test_backend.search(A__ ) is None: return None a__ : Optional[Any] = [b[0] for b in _re_backend.findall(A__ )] backends.sort() return "_and_".join(A__ ) def A_ ( A__ ) -> str: with open(A__ , 'r' , encoding='utf-8' , newline='\n' ) as f: a__ : Optional[Any] = f.readlines() a__ : Optional[Any] = 0 while line_index < len(A__ ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A__ ): return None # First grab the objects without a specific backend in _import_structure a__ : int = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: a__ : Optional[int] = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A__ ): a__ : Optional[int] = _re_one_line_import_struct.search(A__ ).groups()[0] a__ : List[str] = re.findall(R'\[([^\]]+)\]' , A__ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue a__ : int = _re_import_struct_key_value.search(A__ ) if single_line_import_search is not None: a__ : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(A__ ) > 0] objects.extend(A__ ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 a__ : Any = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. a__ : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: a__ : List[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 a__ : Any = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): a__ : Any = lines[line_index] if _re_import_struct_add_one.search(A__ ) is not None: objects.append(_re_import_struct_add_one.search(A__ ).groups()[0] ) elif _re_import_struct_add_many.search(A__ ) is not None: a__ : int = _re_import_struct_add_many.search(A__ ).groups()[0].split(', ' ) a__ : List[Any] = [obj[1:-1] for obj in imports if len(A__ ) > 0] objects.extend(A__ ) elif _re_between_brackets.search(A__ ) is not None: a__ : List[str] = _re_between_brackets.search(A__ ).groups()[0].split(', ' ) a__ : int = [obj[1:-1] for obj in imports if len(A__ ) > 0] objects.extend(A__ ) elif _re_quote_object.search(A__ ) is not None: objects.append(_re_quote_object.search(A__ ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 a__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend a__ : Union[str, Any] = [] while ( line_index < len(A__ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): a__ : List[Any] = lines[line_index] a__ : List[str] = _re_import.search(A__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 a__ : Dict = {'none': objects} # Let's continue with backend-specific objects while line_index < len(A__ ): # If the line is an if is_backend_available, we grab all objects associated. a__ : Any = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: a__ : str = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 a__ : List[str] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): a__ : List[str] = lines[line_index] a__ : Union[str, Any] = _re_import.search(A__ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 a__ : int = objects else: line_index += 1 return import_dict_objects, type_hint_objects def A_ ( A__ , A__ ) -> Dict: def find_duplicates(A__ ): return [k for k, v in collections.Counter(A__ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] a__ : Union[str, Any] = [] for key in import_dict_objects.keys(): a__ : Union[str, Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' ) a__ : Optional[int] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): a__ : str = 'base imports' if key == 'none' else F'{key} backend' errors.append(F'Differences for {name}:' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F' {a} in TYPE_HINT but not in _import_structure.' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F' {a} in _import_structure but not in TYPE_HINT.' ) return errors def A_ ( ) -> List[Any]: a__ : Tuple = [] for root, _, files in os.walk(A__ ): if "__init__.py" in files: a__ : Tuple = os.path.join(A__ , '__init__.py' ) a__ : Optional[int] = parse_init(A__ ) if objects is not None: a__ : List[Any] = analyze_results(*A__ ) if len(A__ ) > 0: a__ : List[Any] = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}' failures.append('\n'.join(A__ ) ) if len(A__ ) > 0: raise ValueError('\n\n'.join(A__ ) ) def A_ ( ) -> List[Any]: a__ : List[str] = [] for path, directories, files in os.walk(A__ ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(A__ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A__ ) / folder).glob('*.py' ) ) ) == 0: continue a__ : List[str] = str((Path(A__ ) / folder).relative_to(A__ ) ) a__ : List[Any] = short_path.replace(os.path.sep , '.' ) submodules.append(A__ ) for fname in files: if fname == "__init__.py": continue a__ : str = str((Path(A__ ) / fname).relative_to(A__ ) ) a__ : Any = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(A__ ) return submodules lowercase : Union[str, Any] = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def A_ ( ) -> Dict: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import a__ : List[Any] = direct_transformers_import(A__ ) a__ : Optional[int] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(A__ , '__init__.py' ) , 'r' ) as f: a__ : Optional[Any] = f.read() import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , A__ ) ) ) a__ : List[str] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(A__ ) > 0: a__ : Optional[int] = '\n'.join(F'- {module}' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registed in the main init of Transformers:\n' F'{list_of_modules}\n' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
392
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): lowercase__ = "sew-d" def __init__( self : List[str] , lowerCAmelCase_ : Tuple=3_2 , lowerCAmelCase_ : int=7_6_8 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : List[str]=1_2 , lowerCAmelCase_ : str=3_0_7_2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : Optional[int]=5_1_2 , lowerCAmelCase_ : int=2_5_6 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[Any]=("p2c", "c2p") , lowerCAmelCase_ : List[Any]="layer_norm" , lowerCAmelCase_ : List[str]="gelu_python" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : List[Any]=1E-7 , lowerCAmelCase_ : str=1E-5 , lowerCAmelCase_ : List[str]="group" , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : str=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCAmelCase_ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : str=1_2_8 , lowerCAmelCase_ : Optional[Any]=1_6 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : str=0.05 , lowerCAmelCase_ : str=1_0 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : str=1_0 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Optional[int]="mean" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[str]=2_5_6 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : Union[str, Any]=1 , lowerCAmelCase_ : Optional[int]=2 , **lowerCAmelCase_ : Optional[int] , ): """simple docstring""" super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase) lowercase_ = hidden_size lowercase_ = feat_extract_norm lowercase_ = feat_extract_activation lowercase_ = list(_lowerCAmelCase) lowercase_ = list(_lowerCAmelCase) lowercase_ = list(_lowerCAmelCase) lowercase_ = conv_bias lowercase_ = num_conv_pos_embeddings lowercase_ = num_conv_pos_embedding_groups lowercase_ = len(self.conv_dim) lowercase_ = num_hidden_layers lowercase_ = intermediate_size lowercase_ = squeeze_factor lowercase_ = max_position_embeddings lowercase_ = position_buckets lowercase_ = share_att_key lowercase_ = relative_attention lowercase_ = norm_rel_ebd lowercase_ = list(_lowerCAmelCase) lowercase_ = hidden_act lowercase_ = num_attention_heads lowercase_ = hidden_dropout lowercase_ = attention_dropout lowercase_ = activation_dropout lowercase_ = feat_proj_dropout lowercase_ = final_dropout lowercase_ = layer_norm_eps lowercase_ = feature_layer_norm_eps lowercase_ = initializer_range lowercase_ = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase_ = apply_spec_augment lowercase_ = mask_time_prob lowercase_ = mask_time_length lowercase_ = mask_time_min_masks lowercase_ = mask_feature_prob lowercase_ = mask_feature_length lowercase_ = mask_feature_min_masks # ctc loss lowercase_ = ctc_loss_reduction lowercase_ = ctc_zero_infinity # sequence classification lowercase_ = use_weighted_layer_sum lowercase_ = classifier_proj_size @property def _UpperCAmelCase ( self : Tuple): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
567
'''simple docstring''' from torch import nn def _A ( _lowerCAmelCase ): """simple docstring""" if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f"""Unsupported activation function: {act_fn}""" )
474
0
import numpy as np from PIL import Image def snake_case ( UpperCAmelCase : np.ndarray, UpperCAmelCase : int, UpperCAmelCase : int ): A = np.array(UpperCAmelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) A = 0 A = 0 A = 0 A = 0 # compute the shape of the output matrix A = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape A = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix A = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A = 0 A = 0 return updated_arr def snake_case ( UpperCAmelCase : np.ndarray, UpperCAmelCase : int, UpperCAmelCase : int ): A = np.array(UpperCAmelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) A = 0 A = 0 A = 0 A = 0 # compute the shape of the output matrix A = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape A = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix A = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 A = 0 A = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image lowerCAmelCase_ = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
707
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def snake_case ( UpperCAmelCase : List[Any] ): A = [ 'encoder.version', 'decoder.version', 'model.encoder.version', 'model.decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'encoder.embed_positions._float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(UpperCAmelCase, UpperCAmelCase ) def snake_case ( UpperCAmelCase : Union[str, Any] ): A = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: A = s_dict.pop(UpperCAmelCase ) elif "subsample" in key: A = s_dict.pop(UpperCAmelCase ) def snake_case ( UpperCAmelCase : Union[str, Any] ): A , A = emb.weight.shape A = nn.Linear(UpperCAmelCase, UpperCAmelCase, bias=UpperCAmelCase ) A = emb.weight.data return lin_layer def snake_case ( UpperCAmelCase : str, UpperCAmelCase : str ): A = torch.load(UpperCAmelCase, map_location='cpu' ) A = mam_aaa['args'] A = mam_aaa['model'] A = state_dict['decoder.output_projection.weight'] remove_ignore_keys_(UpperCAmelCase ) rename_keys(UpperCAmelCase ) A = state_dict['decoder.embed_tokens.weight'].shape[0] A = args.share_decoder_input_output_embed A = [int(UpperCAmelCase ) for i in args.conv_kernel_sizes.split(',' )] A = SpeechaTextConfig( vocab_size=UpperCAmelCase, max_source_positions=args.max_source_positions, max_target_positions=args.max_target_positions, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', num_conv_layers=len(UpperCAmelCase ), conv_channels=args.conv_channels, conv_kernel_sizes=UpperCAmelCase, input_feat_per_channel=args.input_feat_per_channel, input_channels=args.input_channels, tie_word_embeddings=UpperCAmelCase, num_beams=5, max_length=2_00, use_cache=UpperCAmelCase, decoder_start_token_id=2, early_stopping=UpperCAmelCase, ) A = SpeechaTextForConditionalGeneration(UpperCAmelCase ) A , A = model.model.load_state_dict(UpperCAmelCase, strict=UpperCAmelCase ) if len(UpperCAmelCase ) > 0 and not set(UpperCAmelCase ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' f' but all the following weights are missing {missing}' ) if tie_embeds: A = make_linear_from_emb(model.model.decoder.embed_tokens ) else: A = lm_head_weights model.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') lowerCAmelCase_ = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
110
0
'''simple docstring''' import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation lowerCAmelCase : Dict =logging.get_logger(__name__) lowerCAmelCase : Optional[int] ={'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''} lowerCAmelCase : List[str] ={ '''vocab_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''', }, '''emoji_file''': { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''', }, } lowerCAmelCase : List[str] ={ '''abeja/gpt-neox-japanese-2.7b''': 2_048, } def UpperCAmelCase_ ( __lowerCamelCase : Optional[int] ,__lowerCamelCase : Optional[int] ): with open(__lowerCamelCase ,"r" ,encoding="utf-8" ) as f: lowercase_ :List[Any] = json.loads(f.read() ) lowercase_ :Any = collections.OrderedDict() lowercase_ :List[str] = collections.OrderedDict() lowercase_ :Dict = collections.OrderedDict() with open(__lowerCamelCase ,"r" ,encoding="utf-8" ) as f: lowercase_ :Dict = f.readlines() lowercase_ :Tuple = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token] for idx, b in enumerate(__lowerCamelCase ): lowercase_ :Optional[Any] = b lowercase_ :Optional[Any] = idx for wd in b: lowercase_ :List[str] = idx return vocab, raw_vocab, ids_to_tokens, emoji class a_ ( _lowerCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A = ["input_ids", "attention_mask"] def __init__( self : int , lowercase : List[str] , lowercase : List[str] , lowercase : Optional[int]="<|endoftext|>" , lowercase : List[str]="<|endoftext|>" , lowercase : Dict="<|startoftext|>" , lowercase : Optional[Any]="<|endoftext|>" , lowercase : Any=False , **lowercase : List[Any] , ): """simple docstring""" super().__init__( unk_token=lowercase , pad_token=lowercase , bos_token=lowercase , eos_token=lowercase , do_clean_text=lowercase , **lowercase , ) if not os.path.isfile(lowercase ): raise ValueError( F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained' " model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) if not os.path.isfile(lowercase ): raise ValueError( F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google' " pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" ) lowercase_ :List[str] = do_clean_text lowercase_ , lowercase_ , lowercase_ , lowercase_ :Union[str, Any] = load_vocab_and_emoji(lowercase , lowercase ) lowercase_ :int = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowercase__ ( self : List[str] ): """simple docstring""" return len(self.raw_vocab ) def lowercase__ ( self : List[str] ): """simple docstring""" return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowercase__ ( self : List[Any] , lowercase : Union[str, Any] ): """simple docstring""" return self.subword_tokenizer.tokenize(lowercase , clean=self.do_clean_text ) def lowercase__ ( self : Union[str, Any] , lowercase : str ): """simple docstring""" return self.vocab.get(lowercase , self.vocab.get(self.unk_token ) ) def lowercase__ ( self : List[str] , lowercase : Any ): """simple docstring""" return self.subword_tokenizer.convert_id_to_token(lowercase ) def lowercase__ ( self : List[Any] , lowercase : int ): """simple docstring""" lowercase_ :Optional[int] = "".join(lowercase ).strip() return out_string def lowercase__ ( self : Optional[Any] , lowercase : "Conversation" ): """simple docstring""" lowercase_ :Dict = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowercase , add_special_tokens=lowercase ) + [self.eos_token_id] ) if len(lowercase ) > self.model_max_length: lowercase_ :Tuple = input_ids[-self.model_max_length :] return input_ids def lowercase__ ( self : List[str] , lowercase : str , lowercase : Optional[str] = None ): """simple docstring""" lowercase_ :Tuple = 0 if os.path.isdir(lowercase ): lowercase_ :List[Any] = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) lowercase_ :List[Any] = os.path.join( lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] ) else: lowercase_ :Tuple = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"] ) lowercase_ :Any = ( (filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"] ) with open(lowercase , "w" , encoding="utf-8" ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' " Please check that the vocabulary is not corrupted!" ) lowercase_ :List[Any] = token_index writer.write(",".join(lowercase ) + "\n" ) index += 1 with open(lowercase , "w" , encoding="utf-8" ) as writer: json.dump(self.emoji , lowercase ) return vocab_file, emoji_file class a_ ( _lowerCAmelCase ): def __init__( self : List[str] , lowercase : List[Any] , lowercase : Any , lowercase : Any ): """simple docstring""" lowercase_ :Dict = vocab # same as swe lowercase_ :Union[str, Any] = ids_to_tokens # same as bpe lowercase_ :List[str] = emoji lowercase_ :Optional[int] = np.max([len(lowercase ) for w in self.vocab.keys()] ) lowercase_ :str = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" ) lowercase_ :List[str] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" ) lowercase_ :str = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" ) lowercase_ :int = re.compile( R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowercase_ :Any = re.compile( R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" ) lowercase_ :Union[str, Any] = re.compile( R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" ) lowercase_ :List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿" lowercase_ :Dict = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟" lowercase_ :Optional[Any] = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} ) def __len__( self : Tuple ): """simple docstring""" return len(self.ids_to_tokens ) def lowercase__ ( self : Optional[int] , lowercase : Dict ): """simple docstring""" lowercase_ :Tuple = self.content_repattera.sub("<URL>" , lowercase ) lowercase_ :Any = self.content_repattera.sub("<EMAIL>" , lowercase ) lowercase_ :Optional[Any] = self.content_repattera.sub("<TEL>" , lowercase ) lowercase_ :List[str] = self.content_repattera.sub("<DATE>" , lowercase ) lowercase_ :Optional[Any] = self.content_repattera.sub("<DATE>" , lowercase ) lowercase_ :Optional[int] = self.content_repattera.sub("<PRICE>" , lowercase ) lowercase_ :Any = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: lowercase_ :Optional[int] = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" ) return content def lowercase__ ( self : List[Any] , lowercase : Tuple , lowercase : List[str]=False ): """simple docstring""" lowercase_ :int = text.replace(" " , "<SP>" ) lowercase_ :Optional[int] = text.replace(" " , "<SP>" ) lowercase_ :Dict = text.replace("\r\n" , "<BR>" ) lowercase_ :Union[str, Any] = text.replace("\n" , "<BR>" ) lowercase_ :Optional[Any] = text.replace("\r" , "<BR>" ) lowercase_ :Optional[int] = text.replace("\t" , "<TAB>" ) lowercase_ :Optional[int] = text.replace("—" , "ー" ) lowercase_ :Union[str, Any] = text.replace("−" , "ー" ) for k, v in self.emoji["emoji"].items(): if k in text: lowercase_ :Union[str, Any] = text.replace(lowercase , lowercase ) if clean: lowercase_ :List[str] = self.clean_text(lowercase ) def check_simbol(lowercase : Union[str, Any] ): lowercase_ :str = x.encode() if len(lowercase ) == 1 and len(lowercase ) == 2: lowercase_ :str = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0xC2A1 and c <= 0xC2BF) or (c >= 0xC780 and c <= 0xC783) or (c >= 0xCAB9 and c <= 0xCBBF) or (c >= 0xCC80 and c <= 0xCDA2) ): return True return False def checkuae(lowercase : Tuple ): lowercase_ :int = x.encode() if len(lowercase ) == 1 and len(lowercase ) == 3: lowercase_ :str = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0xE28080 and c <= 0xE2B07F: return True return False lowercase_ :Any = 0 lowercase_ :Dict = [] while pos < len(lowercase ): lowercase_ :int = min(len(lowercase ) , pos + self.maxlen + 1 ) if text[pos] == "<" else pos + 3 lowercase_ :Optional[int] = [] # (token_id, token, pos) for e in range(lowercase , lowercase , -1 ): lowercase_ :List[str] = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowercase ) > 2: lowercase_ :List[Any] = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowercase ) > 0: # the smallest token_id is adopted lowercase_ , lowercase_ , lowercase_ :List[str] = sorted(lowercase , key=lambda lowercase : x[0] )[0] result.append(lowercase ) lowercase_ :Dict = e else: lowercase_ :Optional[Any] = pos + 1 lowercase_ :Any = text[pos:end] if check_simbol(lowercase ): result.append("<KIGOU>" ) elif checkuae(lowercase ): result.append("<U2000U2BFF>" ) else: for i in wd.encode("utf-8" ): result.append("<|byte%d|>" % i ) lowercase_ :Tuple = end return result def lowercase__ ( self : Dict , lowercase : Any , lowercase : Any="\n" ): """simple docstring""" lowercase_ :str = [] lowercase_ :Optional[int] = [] lowercase_ :Tuple = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowercase ) > 0: words.append(bytearray(lowercase ).decode("utf-8" , errors="replace" ) ) lowercase_ :List[str] = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji["emoji_inv"][word] ) elif word == "<SP>": words.append(" " ) elif word == "<BR>": words.append(lowercase ) elif word == "<TAB>": words.append("\t" ) elif word == "<BLOCK>": words.append("▀" ) elif word == "<KIGOU>": words.append("ǀ" ) elif word == "<U2000U2BFF>": words.append("‖" ) else: words.append(lowercase ) if len(lowercase ) > 0: words.append(bytearray(lowercase ).decode("utf-8" , errors="replace" ) ) lowercase_ :List[Any] = "".join(lowercase ) return text
172
'''simple docstring''' def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ): return abs(__lowerCamelCase ) if a == 0 else greatest_common_divisor(b % a ,__lowerCamelCase ) def UpperCAmelCase_ ( __lowerCamelCase : int ,__lowerCamelCase : int ): while y: # --> when y=0 then loop will terminate and return x as final GCD. lowercase_ , lowercase_ :Optional[Any] = y, x % y return abs(__lowerCamelCase ) def UpperCAmelCase_ ( ): try: lowercase_ :List[Any] = input("Enter two integers separated by comma (,): " ).split("," ) lowercase_ :Any = int(nums[0] ) lowercase_ :Any = int(nums[1] ) print( F'greatest_common_divisor({num_a}, {num_a}) = ' F'{greatest_common_divisor(__lowerCamelCase ,__lowerCamelCase )}' ) print(F'By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__lowerCamelCase ,__lowerCamelCase )}' ) except (IndexError, UnboundLocalError, ValueError): print("Wrong input" ) if __name__ == "__main__": main()
172
1
def _UpperCAmelCase ( A , A ): '''simple docstring''' _validate_point(A ) _validate_point(A ) if len(A ) != len(A ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(a - b ) for a, b in zip(A , A ) ) ) def _UpperCAmelCase ( A ): '''simple docstring''' if point: if isinstance(A , A ): for item in point: if not isinstance(A , (int, float) ): UpperCAmelCase__ =( "Expected a list of numbers as input, found " F"""{type(A ).__name__}""" ) raise TypeError(A ) else: UpperCAmelCase__ =F"""Expected a list of numbers as input, found {type(A ).__name__}""" raise TypeError(A ) else: raise ValueError("Missing an input" ) def _UpperCAmelCase ( A , A ): '''simple docstring''' _validate_point(A ) _validate_point(A ) if len(A ) != len(A ): raise ValueError("Both points must be in the same n-dimensional space" ) return float(sum(abs(x - y ) for x, y in zip(A , A ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
510
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { 'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json', 'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json', } class snake_case_ ( a ): '''simple docstring''' __UpperCamelCase = 'luke' def __init__( self, A_=5_0267, A_=50_0000, A_=768, A_=256, A_=12, A_=12, A_=3072, A_="gelu", A_=0.1, A_=0.1, A_=512, A_=2, A_=0.02, A_=1E-12, A_=True, A_=None, A_=1, A_=0, A_=2, **A_, ) -> List[str]: super().__init__(pad_token_id=A_, bos_token_id=A_, eos_token_id=A_, **A_ ) UpperCAmelCase__ =vocab_size UpperCAmelCase__ =entity_vocab_size UpperCAmelCase__ =hidden_size UpperCAmelCase__ =entity_emb_size UpperCAmelCase__ =num_hidden_layers UpperCAmelCase__ =num_attention_heads UpperCAmelCase__ =hidden_act UpperCAmelCase__ =intermediate_size UpperCAmelCase__ =hidden_dropout_prob UpperCAmelCase__ =attention_probs_dropout_prob UpperCAmelCase__ =max_position_embeddings UpperCAmelCase__ =type_vocab_size UpperCAmelCase__ =initializer_range UpperCAmelCase__ =layer_norm_eps UpperCAmelCase__ =use_entity_aware_attention UpperCAmelCase__ =classifier_dropout
510
1
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowercase__( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self :int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : int = -1 SCREAMING_SNAKE_CASE : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE : Any = TextStreamer(lowerCamelCase_ ) model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE : List[str] = cs.out[:-1] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def __lowerCAmelCase ( self :Any ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = -1 SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(greedy_ids[0] ) SCREAMING_SNAKE_CASE : Dict = TextIteratorStreamer(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} SCREAMING_SNAKE_CASE : Union[str, Any] = Thread(target=model.generate , kwargs=lowerCamelCase_ ) thread.start() SCREAMING_SNAKE_CASE : List[Any] = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def __lowerCAmelCase ( self :Union[str, Any] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = -1 SCREAMING_SNAKE_CASE : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : str = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Tuple = greedy_ids[:, input_ids.shape[1] :] SCREAMING_SNAKE_CASE : int = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: SCREAMING_SNAKE_CASE : List[str] = TextStreamer(lowerCamelCase_ , skip_prompt=lowerCamelCase_ ) model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer SCREAMING_SNAKE_CASE : Union[str, Any] = cs.out[:-1] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def __lowerCAmelCase ( self :Dict ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained('''distilgpt2''' ) SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : Dict = -1 SCREAMING_SNAKE_CASE : Optional[int] = torch.ones((1, 5) , device=lowerCamelCase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: SCREAMING_SNAKE_CASE : Tuple = TextStreamer(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) model.generate(lowerCamelCase_ , max_new_tokens=1 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token SCREAMING_SNAKE_CASE : Dict = cs.out[:-1] # Remove the final "\n" SCREAMING_SNAKE_CASE : Tuple = tokenizer(lowerCamelCase_ , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __lowerCAmelCase ( self :Any ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[Any] = -1 SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE : List[str] = TextIteratorStreamer(lowerCamelCase_ , timeout=0.0_0_1 ) SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer} SCREAMING_SNAKE_CASE : Tuple = Thread(target=model.generate , kwargs=lowerCamelCase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowerCamelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = '''''' for new_text in streamer: streamer_text += new_text
698
"""simple docstring""" # using dfs for finding eulerian path traversal def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ ) return path def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : int = 0 SCREAMING_SNAKE_CASE : str = -1 for i in range(a_ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 SCREAMING_SNAKE_CASE : Tuple = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def __A ( a_ : Any , a_ : int )-> Any: '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ ) if check == 3: print('''graph is not Eulerian''' ) print('''no path''' ) return SCREAMING_SNAKE_CASE : Tuple = 1 if check == 2: SCREAMING_SNAKE_CASE : Optional[int] = odd_node print('''graph has a Euler path''' ) if check == 1: print('''graph has a Euler cycle''' ) SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ ) print(a_ ) def __A ( )-> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]} SCREAMING_SNAKE_CASE : int = { 1: [], 2: [] # all degree is zero } SCREAMING_SNAKE_CASE : List[str] = 10 check_euler(a_ , a_ ) check_euler(a_ , a_ ) check_euler(a_ , a_ ) check_euler(a_ , a_ ) check_euler(a_ , a_ ) if __name__ == "__main__": main()
698
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _SCREAMING_SNAKE_CASE = { "configuration_encodec": [ "ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP", "EncodecConfig", ], "feature_extraction_encodec": ["EncodecFeatureExtractor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST", "EncodecModel", "EncodecPreTrainedModel", ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
517
'''simple docstring''' import gc import threading import time import psutil import torch class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple )-> Dict: snake_case = psutil.Process() snake_case = False def lowerCAmelCase ( self : int )-> Optional[int]: snake_case = -1 while True: snake_case = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def lowerCAmelCase ( self : Union[str, Any] )-> Union[str, Any]: snake_case = True snake_case = threading.Thread(target=self.peak_monitor ) snake_case = True self.thread.start() def lowerCAmelCase ( self : int )-> Optional[Any]: snake_case = False self.thread.join() return self.cpu_memory_peak _SCREAMING_SNAKE_CASE = PeakCPUMemory() def __lowerCamelCase ( ) -> List[Any]: # Time snake_case = {"""time""": time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem snake_case = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): snake_case = torch.cuda.memory_allocated(__lowerCAmelCase ) torch.cuda.reset_peak_memory_stats() return measures def __lowerCamelCase ( __lowerCAmelCase : Optional[Any] ) -> str: # Time snake_case = {"""time""": time.time() - start_measures["""time"""]} gc.collect() torch.cuda.empty_cache() # CPU mem snake_case = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**20 snake_case = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): snake_case = (torch.cuda.memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20 snake_case = (torch.cuda.max_memory_allocated(__lowerCAmelCase ) - start_measures[str(__lowerCAmelCase )]) / 2**20 return measures def __lowerCamelCase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] ) -> str: print(F'''{description}:''' ) print(F'''- Time: {measures["time"]:.2f}s''' ) for i in range(torch.cuda.device_count() ): print(F'''- GPU {i} allocated: {measures[str(__lowerCAmelCase )]:.2f}MiB''' ) snake_case = measures[F'''{i}-peak'''] print(F'''- GPU {i} peak: {peak:.2f}MiB''' ) print(F'''- CPU RAM allocated: {measures["cpu"]:.2f}MiB''' ) print(F'''- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB''' )
517
1
"""simple docstring""" class lowerCAmelCase : '''simple docstring''' def __init__( self , lowerCAmelCase__ ) -> None: SCREAMING_SNAKE_CASE = set_counts SCREAMING_SNAKE_CASE = max(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = [1] * num_sets SCREAMING_SNAKE_CASE = list(range(lowerCAmelCase__ ) ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool: SCREAMING_SNAKE_CASE = self.get_parent(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = self.get_parent(lowerCAmelCase__ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 SCREAMING_SNAKE_CASE = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] SCREAMING_SNAKE_CASE = 0 SCREAMING_SNAKE_CASE = src_parent SCREAMING_SNAKE_CASE = self.set_counts[src_parent] SCREAMING_SNAKE_CASE = max(self.max_set , lowerCAmelCase__ ) return True def __A ( self , lowerCAmelCase__ ) -> int: if self.parents[disj_set] == disj_set: return disj_set SCREAMING_SNAKE_CASE = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
247
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __UpperCamelCase = '''platform''' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCAmelCase : '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = PegasusConfig SCREAMING_SNAKE_CASE_ : Optional[Any] = {} SCREAMING_SNAKE_CASE_ : Dict = """gelu""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> Tuple: SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = eos_token_id SCREAMING_SNAKE_CASE = pad_token_id SCREAMING_SNAKE_CASE = bos_token_id def __A ( self ) -> Optional[int]: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return config, inputs_dict def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]: SCREAMING_SNAKE_CASE = 20 SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) SCREAMING_SNAKE_CASE = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]: SCREAMING_SNAKE_CASE = 20 SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) SCREAMING_SNAKE_CASE = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) SCREAMING_SNAKE_CASE = model.decode( decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , ) SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' ) def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Union[str, Any]: if attention_mask is None: SCREAMING_SNAKE_CASE = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: SCREAMING_SNAKE_CASE = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE_ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : List[Any] = False SCREAMING_SNAKE_CASE_ : Dict = False SCREAMING_SNAKE_CASE_ : List[Any] = False def __A ( self ) -> Union[str, Any]: SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ ) def __A ( self ) -> Any: self.config_tester.run_common_tests() def __A ( self ) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __A ( self ) -> str: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __A ( self ) -> List[Any]: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ ) @jax.jit def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ): return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ) with self.subTest('JIT Enabled' ): SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def __A ( self ) -> Tuple: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) SCREAMING_SNAKE_CASE = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): return model.decode( decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , ) with self.subTest('JIT Enabled' ): SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple() self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __A ( self ) -> Union[str, Any]: for model_class_name in self.all_model_classes: SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = np.ones((1, 1) ) SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) @slow def __A ( self ) -> Any: SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' ) SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained('google/pegasus-xsum' ) SCREAMING_SNAKE_CASE = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] SCREAMING_SNAKE_CASE = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='np' , truncation=lowerCAmelCase__ , max_length=512 , padding=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__ , num_beams=2 ).sequences SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) assert tgt_text == decoded
247
1
"""simple docstring""" import math lowercase = 10 lowercase = 7 lowercase = BALLS_PER_COLOUR * NUM_COLOURS def _lowerCAmelCase ( __lowerCamelCase:int = 2_0 ): '''simple docstring''' __magic_name__ = math.comb(__lowerCamelCase , __lowerCamelCase ) __magic_name__ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , __lowerCamelCase ) __magic_name__ = NUM_COLOURS * (1 - missing_colour / total) return f'''{result:.9f}''' if __name__ == "__main__": print(solution(20))
706
"""simple docstring""" import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowercase = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') lowercase , lowercase = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') lowercase = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: lowercase = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowercase = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f'''pip install -r transformers/examples/{example_dir}/requirements.txt''']) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}''']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
468
0
from collections import defaultdict from pathlib import Path import pandas as pd from rouge_cli import calculate_rouge_path from utils import calculate_rouge _lowerCAmelCase : Dict = [ '''Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the''' ''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe''' ''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''', '''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal''' ''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s''' ''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the''' ''' body.''', '''Amnesty International releases its annual report on the death penalty. The report catalogs the use of''' ''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the''' ''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital''' ''' punishment.''', ] _lowerCAmelCase : Dict = [ '''Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .''' ''' Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz''' ''' had informed his Lufthansa training school of an episode of severe depression, airline says .''', '''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .''' ''' Israel and the United States opposed the move, which could open the door to war crimes investigations against''' ''' Israelis .''', '''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to''' ''' death . Organization claims that governments around the world are using the threat of terrorism to advance''' ''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death''' ''' sentences up by 28% .''', ] def __snake_case ( ) -> Any: A_ : List[Any] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bootstrap_aggregation=_SCREAMING_SNAKE_CASE , rouge_keys=["rouge2", "rougeL"] ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) A_ : int = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bootstrap_aggregation=_SCREAMING_SNAKE_CASE , rouge_keys=["rouge2"] ) assert ( pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean() == pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean() ) def __snake_case ( ) -> Optional[Any]: A_ : Union[str, Any] = "rougeLsum" A_ : Union[str, Any] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k] A_ : str = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=[k] )[k] assert score > score_no_sep def __snake_case ( ) -> Optional[Any]: A_ : str = ["rouge1", "rouge2", "rougeL"] A_ : List[str] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=_SCREAMING_SNAKE_CASE ) A_ : Optional[Any] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE , rouge_keys=_SCREAMING_SNAKE_CASE ) assert score_sep == score_no_sep def __snake_case ( ) -> Tuple: A_ : List[str] = [ "Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.", "Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .", ] A_ : str = [ "Margot Frank, died in 1945, a month earlier than previously thought.", "Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of" " the final seconds on board Flight 9525.", ] assert calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE ) == calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , newline_sep=_SCREAMING_SNAKE_CASE ) def __snake_case ( ) -> Tuple: A_ : List[str] = [ "\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" " ] A_ : Optional[Any] = [ " Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ." ] A_ : str = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rouge_keys=["rougeLsum"] , newline_sep=_SCREAMING_SNAKE_CASE )["rougeLsum"] A_ : Optional[Any] = calculate_rouge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , rouge_keys=["rougeLsum"] )["rougeLsum"] assert new_score > prev_score def __snake_case ( ) -> int: A_ : Tuple = Path("examples/seq2seq/test_data/wmt_en_ro" ) A_ : Any = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) A_ : List[str] = calculate_rouge_path( data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_SCREAMING_SNAKE_CASE ) assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
454
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase__ = { """configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""], """tokenization_mvp""": ["""MvpTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""MvpTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """MVP_PRETRAINED_MODEL_ARCHIVE_LIST""", """MvpForCausalLM""", """MvpForConditionalGeneration""", """MvpForQuestionAnswering""", """MvpForSequenceClassification""", """MvpModel""", """MvpPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig from .tokenization_mvp import MvpTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mvp_fast import MvpTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mvp import ( MVP_PRETRAINED_MODEL_ARCHIVE_LIST, MvpForCausalLM, MvpForConditionalGeneration, MvpForQuestionAnswering, MvpForSequenceClassification, MvpModel, MvpPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
225
0
'''simple docstring''' class lowerCamelCase : def __init__( self ) -> Tuple: """simple docstring""" _snake_case : Union[str, Any] = 0 _snake_case : Any = 0 _snake_case : int = {} def UpperCAmelCase_ ( self , lowercase__ ) -> Tuple: """simple docstring""" if vertex not in self.adjacency: _snake_case : Tuple = {} self.num_vertices += 1 def UpperCAmelCase_ ( self , lowercase__ , lowercase__ , lowercase__ ) -> List[Any]: """simple docstring""" self.add_vertex(UpperCAmelCase_ ) self.add_vertex(UpperCAmelCase_ ) if head == tail: return _snake_case : Tuple = weight _snake_case : Optional[int] = weight def UpperCAmelCase_ ( self ) -> str: """simple docstring""" _snake_case : List[Any] = self.get_edges() for edge in edges: _snake_case : str = edge edges.remove((tail, head, weight) ) for i in range(len(UpperCAmelCase_ ) ): _snake_case : Any = list(edges[i] ) edges.sort(key=lambda lowercase__ : e[2] ) for i in range(len(UpperCAmelCase_ ) - 1 ): if edges[i][2] >= edges[i + 1][2]: _snake_case : Optional[int] = edges[i][2] + 1 for edge in edges: _snake_case : List[Any] = edge _snake_case : Dict = weight _snake_case : List[Any] = weight def __str__( self ) -> List[str]: """simple docstring""" _snake_case : List[Any] = '' for tail in self.adjacency: for head in self.adjacency[tail]: _snake_case : Union[str, Any] = self.adjacency[head][tail] string += F'''{head} -> {tail} == {weight}\n''' return string.rstrip('''\n''' ) def UpperCAmelCase_ ( self ) -> Dict: """simple docstring""" _snake_case : Optional[Any] = [] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def UpperCAmelCase_ ( self ) -> int: """simple docstring""" return self.adjacency.keys() @staticmethod def UpperCAmelCase_ ( lowercase__=None , lowercase__=None ) -> List[str]: """simple docstring""" _snake_case : Tuple = Graph() if vertices is None: _snake_case : Tuple = [] if edges is None: _snake_case : List[str] = [] for vertex in vertices: g.add_vertex(UpperCAmelCase_ ) for edge in edges: g.add_edge(*UpperCAmelCase_ ) return g class lowerCamelCase : def __init__( self ) -> Optional[Any]: """simple docstring""" _snake_case : int = {} _snake_case : List[str] = {} def __len__( self ) -> Union[str, Any]: """simple docstring""" return len(self.parent ) def UpperCAmelCase_ ( self , lowercase__ ) -> Union[str, Any]: """simple docstring""" if item in self.parent: return self.find(UpperCAmelCase_ ) _snake_case : List[Any] = item _snake_case : Dict = 0 return item def UpperCAmelCase_ ( self , lowercase__ ) -> str: """simple docstring""" if item not in self.parent: return self.make_set(UpperCAmelCase_ ) if item != self.parent[item]: _snake_case : List[str] = self.find(self.parent[item] ) return self.parent[item] def UpperCAmelCase_ ( self , lowercase__ , lowercase__ ) -> Dict: """simple docstring""" _snake_case : List[str] = self.find(UpperCAmelCase_ ) _snake_case : Tuple = self.find(UpperCAmelCase_ ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: _snake_case : Union[str, Any] = roota return roota if self.rank[roota] < self.rank[roota]: _snake_case : Any = roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 _snake_case : Dict = roota return roota return None @staticmethod def UpperCAmelCase_ ( lowercase__ ) -> List[Any]: """simple docstring""" _snake_case : Optional[Any] = graph.num_vertices _snake_case : Tuple = Graph.UnionFind() _snake_case : Optional[int] = [] while num_components > 1: _snake_case : Tuple = {} for vertex in graph.get_vertices(): _snake_case : Optional[Any] = -1 _snake_case : int = graph.get_edges() for edge in edges: _snake_case : str = edge edges.remove((tail, head, weight) ) for edge in edges: _snake_case : Optional[Any] = edge _snake_case : Union[str, Any] = union_find.find(UpperCAmelCase_ ) _snake_case : List[Any] = union_find.find(UpperCAmelCase_ ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _snake_case : Optional[Any] = [head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: _snake_case : Tuple = [head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: _snake_case : Union[str, Any] = cheap_edge[vertex] if union_find.find(UpperCAmelCase_ ) != union_find.find(UpperCAmelCase_ ): union_find.union(UpperCAmelCase_ , UpperCAmelCase_ ) mst_edges.append(cheap_edge[vertex] ) _snake_case : int = num_components - 1 _snake_case : Dict = Graph.build(edges=UpperCAmelCase_ ) return mst
706
'''simple docstring''' from __future__ import annotations def _a ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): """simple docstring""" if start is None: _snake_case : Optional[Any] = 0 if end is None: _snake_case : Any = len(lowerCAmelCase_ ) - 1 if start >= end: return _snake_case : Optional[Any] = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: _snake_case , _snake_case : int = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
47
0
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase = False class __UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): def __lowerCamelCase ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : int = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _lowerCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 ) _lowerCAmelCase : Optional[int] = pipe.dual_guided( prompt='first prompt' ,image=_A ,text_to_image_strength=0.7_5 ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_A ) _lowerCAmelCase : Dict = VersatileDiffusionPipeline.from_pretrained(_A ,torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _lowerCAmelCase : Tuple = generator.manual_seed(0 ) _lowerCAmelCase : int = pipe.dual_guided( prompt='first prompt' ,image=_A ,text_to_image_strength=0.7_5 ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCamelCase ( self ): '''simple docstring''' _lowerCAmelCase : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) _lowerCAmelCase : Optional[int] = "cyberpunk 2077" _lowerCAmelCase : str = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) _lowerCAmelCase : Tuple = torch.manual_seed(0 ) _lowerCAmelCase : str = pipe.dual_guided( prompt=_A ,image=_A ,text_to_image_strength=0.7_5 ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images _lowerCAmelCase : Dict = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _lowerCAmelCase : Optional[Any] = "A painting of a squirrel eating a burger " _lowerCAmelCase : Optional[Any] = torch.manual_seed(0 ) _lowerCAmelCase : List[str] = pipe.text_to_image( prompt=_A ,generator=_A ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images _lowerCAmelCase : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _lowerCAmelCase : List[Any] = pipe.image_variation(_A ,generator=_A ,output_type='numpy' ).images _lowerCAmelCase : Optional[int] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _lowerCAmelCase : List[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
259
'''simple docstring''' from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase__ : """simple docstring""" SCREAMING_SNAKE_CASE__ : int SCREAMING_SNAKE_CASE__ : TreeNode | None = None SCREAMING_SNAKE_CASE__ : TreeNode | None = None a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess") def lowercase ( __magic_name__ ): '''simple docstring''' if root is None: return 0 # Validation def count_nodes(__magic_name__ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(__magic_name__ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ): raise ValueError("The nodes number should be same as the number of coins" ) # Main calculation def get_distrib(__magic_name__ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left ) UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right ) UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess UpperCAmelCase : int = 1 - right_distrib_excess UpperCAmelCase : List[Any] = ( left_distrib_moves + right_distrib_moves + abs(__magic_name__ ) + abs(__magic_name__ ) ) UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right return CoinsDistribResult(__magic_name__ , __magic_name__ ) return get_distrib(__magic_name__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
679
0
import math class __snake_case : '''simple docstring''' def __init__( self : Union[str, Any] , _UpperCamelCase : Any=0) ->str: # a graph with Node 0,1,...,N-1 """simple docstring""" _lowerCamelCase : Optional[Any] = n _lowerCamelCase : Tuple = [ [math.inf for j in range(0 , _UpperCamelCase)] for i in range(0 , _UpperCamelCase) ] # adjacency matrix for weight _lowerCamelCase : List[Any] = [ [math.inf for j in range(0 , _UpperCamelCase)] for i in range(0 , _UpperCamelCase) ] # dp[i][j] stores minimum distance from i to j def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]) ->List[str]: """simple docstring""" _lowerCamelCase : List[Any] = w def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int: """simple docstring""" for k in range(0 , self.n): for i in range(0 , self.n): for j in range(0 , self.n): _lowerCamelCase : Tuple = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j]) def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any]) ->Optional[int]: """simple docstring""" return self.dp[u][v] if __name__ == "__main__": lowerCAmelCase : Dict =Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
15
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class __snake_case ( __lowerCAmelCase ): '''simple docstring''' def __init__( self : Dict , _UpperCamelCase : NestedDataStructureLike[PathLike] , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Optional[Features] = None , _UpperCamelCase : str = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , **_UpperCamelCase : Tuple , ) ->Union[str, Any]: """simple docstring""" super().__init__( _UpperCamelCase , split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , num_proc=_UpperCamelCase , **_UpperCamelCase , ) _lowerCamelCase : List[Any] = path_or_paths if isinstance(_UpperCamelCase , _UpperCamelCase) else {self.split: path_or_paths} _lowerCamelCase : Any = Text( cache_dir=_UpperCamelCase , data_files=_UpperCamelCase , features=_UpperCamelCase , **_UpperCamelCase , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]: """simple docstring""" if self.streaming: _lowerCamelCase : Tuple = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: _lowerCamelCase : List[Any] = None _lowerCamelCase : Any = None _lowerCamelCase : List[str] = None _lowerCamelCase : Dict = None self.builder.download_and_prepare( download_config=_UpperCamelCase , download_mode=_UpperCamelCase , verification_mode=_UpperCamelCase , base_path=_UpperCamelCase , num_proc=self.num_proc , ) _lowerCamelCase : Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=_UpperCamelCase , in_memory=self.keep_in_memory) return dataset
15
1
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name def snake_case ( lowerCAmelCase_ ) -> Any: if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(lowerCAmelCase_ ): return ext raise Exception( f"""Unable to determine file format from file extension {path}. """ f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" ) def snake_case ( lowerCAmelCase_ ) -> List[str]: _snake_case = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) _snake_case = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format _snake_case = PipelineDataFormat.from_str( format=lowerCAmelCase_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(lowerCAmelCase_ , lowerCAmelCase_ ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): def __init__( self : Optional[Any] , __lowerCamelCase : Pipeline , __lowerCamelCase : PipelineDataFormat ): """simple docstring""" _snake_case = nlp _snake_case = reader @staticmethod def __UpperCAmelCase ( __lowerCamelCase : ArgumentParser ): """simple docstring""" _snake_case = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' ) run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' ) run_parser.add_argument('''--input''' , type=__lowerCamelCase , help='''Path to the file to use for inference''' ) run_parser.add_argument('''--output''' , type=__lowerCamelCase , help='''Path to the file that will be used post to write results.''' ) run_parser.add_argument('''--model''' , type=__lowerCamelCase , help='''Name or path to the model to instantiate.''' ) run_parser.add_argument('''--config''' , type=__lowerCamelCase , help='''Name or path to the model\'s config to instantiate.''' ) run_parser.add_argument( '''--tokenizer''' , type=__lowerCamelCase , help='''Name of the tokenizer to use. (default: same as the model name)''' ) run_parser.add_argument( '''--column''' , type=__lowerCamelCase , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , ) run_parser.add_argument( '''--format''' , type=__lowerCamelCase , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , ) run_parser.add_argument( '''--device''' , type=__lowerCamelCase , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' ) run_parser.set_defaults(func=__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case = self._nlp, [] for entry in self._reader: _snake_case = nlp(**__lowerCamelCase ) if self._reader.is_multi_columns else nlp(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): outputs.append(__lowerCamelCase ) else: outputs += output # Saving data if self._nlp.binary_output: _snake_case = self._reader.save_binary(__lowerCamelCase ) logger.warning(f"""Current pipeline requires output to be in binary format, saving at {binary_path}""" ) else: self._reader.save(__lowerCamelCase )
103
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase : """simple docstring""" def __init__( self , UpperCamelCase__ , UpperCamelCase__=3 , UpperCamelCase__=32 , UpperCamelCase__=3 , UpperCamelCase__=10 , UpperCamelCase__=[10, 20, 30, 40] , UpperCamelCase__=[1, 1, 2, 1] , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__="relu" , UpperCamelCase__=3 , UpperCamelCase__=None , ) -> Dict: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = image_size lowerCamelCase_ = num_channels lowerCamelCase_ = embeddings_size lowerCamelCase_ = hidden_sizes lowerCamelCase_ = depths lowerCamelCase_ = is_training lowerCamelCase_ = use_labels lowerCamelCase_ = hidden_act lowerCamelCase_ = num_labels lowerCamelCase_ = scope lowerCamelCase_ = len(UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> int: '''simple docstring''' lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase_ = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self ) -> Dict: '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ = TFRegNetModel(config=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , training=UpperCamelCase__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.num_labels lowerCamelCase_ = TFRegNetForImageClassification(UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCAmelCase ( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs lowerCamelCase_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class lowerCAmelCase ( a , a , unittest.TestCase ): """simple docstring""" __lowercase :Union[str, Any] = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __lowercase :Optional[int] = ( {"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification} if is_tf_available() else {} ) __lowercase :List[str] = False __lowercase :List[str] = False __lowercase :List[Any] = False __lowercase :Dict = False __lowercase :List[str] = False def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = TFRegNetModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def _lowerCAmelCase ( self ) -> Any: '''simple docstring''' pass def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase__ ) lowerCamelCase_ = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ = [*signature.parameters.keys()] lowerCamelCase_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Tuple: '''simple docstring''' def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): lowerCamelCase_ = model_class(UpperCamelCase__ ) lowerCamelCase_ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ ) lowerCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase_ = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCamelCase_ = layer_type lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase_ = True check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def _lowerCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__={} ): lowerCamelCase_ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple() def recursive_check(UpperCamelCase__ , UpperCamelCase__ ): if isinstance(UpperCamelCase__ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ): recursive_check(UpperCamelCase__ , UpperCamelCase__ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(UpperCamelCase__ , UpperCamelCase__ ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' F""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(UpperCamelCase__ , UpperCamelCase__ ) for model_class in self.all_model_classes: lowerCamelCase_ = model_class(UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {'''output_hidden_states''': True} ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ ) check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {'''output_hidden_states''': True} ) def _lowerCAmelCase ( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ ) @slow def _lowerCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFRegNetModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def lowerCamelCase_ ( ): lowerCamelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCamelCase_ = self.default_image_processor lowerCamelCase_ = prepare_img() lowerCamelCase_ = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' ) # forward pass lowerCamelCase_ = model(**UpperCamelCase__ , training=UpperCamelCase__ ) # verify the logits lowerCamelCase_ = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase__ ) lowerCamelCase_ = tf.constant([-0.4_180, -1.5_051, -3.4_836] ) tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 )
142
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
254
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging UpperCamelCase__ = logging.get_logger(__name__) class a ( lowercase ): UpperCamelCase : Optional[int] = ["""pixel_values"""] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = IMAGENET_DEFAULT_MEAN , UpperCamelCase_ = IMAGENET_DEFAULT_STD , **UpperCamelCase_ , ): super().__init__(**UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = size if size is not None else {'shortest_edge': 224} UpperCAmelCase__ : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) UpperCAmelCase__ : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224} UpperCAmelCase__ : str = get_size_dict(UpperCamelCase_ , param_name='crop_size' ) UpperCAmelCase__ : Dict = do_resize UpperCAmelCase__ : Any = size UpperCAmelCase__ : Tuple = resample UpperCAmelCase__ : List[str] = do_center_crop UpperCAmelCase__ : Optional[Any] = crop_size UpperCAmelCase__ : Optional[Any] = do_rescale UpperCAmelCase__ : List[Any] = rescale_factor UpperCAmelCase__ : str = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN UpperCAmelCase__ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ): UpperCAmelCase__ : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: UpperCAmelCase__ : List[Any] = int((256 / 224) * size['shortest_edge'] ) UpperCAmelCase__ : str = get_resize_output_image_size(UpperCamelCase_ , size=UpperCamelCase_ , default_to_square=UpperCamelCase_ ) UpperCAmelCase__ : Dict = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( UpperCamelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): UpperCAmelCase__ : Any = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase_ , size=(size['height'], size['width']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ): return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ): UpperCAmelCase__ : Dict = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : List[Any] = resample if resample is not None else self.resample UpperCAmelCase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : str = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Any = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : List[str] = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : Union[str, Any] = size if size is not None else self.size UpperCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) UpperCAmelCase__ : Any = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : Dict = get_size_dict(UpperCamelCase_ , param_name='crop_size' ) UpperCAmelCase__ : List[str] = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. UpperCAmelCase__ : Union[str, Any] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: UpperCAmelCase__ : List[str] = [self.resize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images] if do_center_crop: UpperCAmelCase__ : Optional[Any] = [self.center_crop(UpperCamelCase_ , UpperCamelCase_ ) for image in images] if do_rescale: UpperCAmelCase__ : int = [self.rescale(UpperCamelCase_ , UpperCamelCase_ ) for image in images] if do_normalize: UpperCAmelCase__ : int = [self.normalize(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for image in images] UpperCAmelCase__ : Any = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] UpperCAmelCase__ : List[str] = {'pixel_values': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
254
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase_ : Optional[Any] = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
24
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A_ = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
29
0
_A = 0 # The first color of the flag. _A = 1 # The second color of the flag. _A = 2 # The third color of the flag. _A = (red, white, blue) def lowercase_ ( A__ ) -> list: """simple docstring""" if not sequence: return [] if len(A__ ) == 1: return list(A__ ) snake_case = 0 snake_case = len(A__ ) - 1 snake_case = 0 while mid <= high: if sequence[mid] == colors[0]: snake_case , snake_case = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: snake_case , snake_case = sequence[high], sequence[mid] high -= 1 else: snake_case = F'The elements inside the sequence must contains only {colors} values' raise ValueError(A__ ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _A = input("Enter numbers separated by commas:\n").strip() _A = [int(item.strip()) for item in user_input.split(",")] print(f"{dutch_national_flag_sort(unsorted)}")
294
import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class lowerCamelCase ( A_ , unittest.TestCase ): UpperCAmelCase__ : Any = CpmAntTokenizer UpperCAmelCase__ : Optional[Any] = False def UpperCAmelCase(self : Optional[Any] ) -> Dict: super().setUp() snake_case = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) @tooslow def UpperCAmelCase(self : Optional[Any] ) -> Optional[int]: snake_case = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" ) snake_case = "今天天气真好!" snake_case = ["今天", "天气", "真", "好", "!"] snake_case = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) snake_case = "今天天气真好!" snake_case = [tokenizer.bos_token] + tokens snake_case = [6, 9_8_0_2, 1_4_9_6_2, 2_0_8_2, 8_3_1, 2_4_4] self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A ) snake_case = tokenizer.decode(_A ) self.assertEqual(_A , _A )
294
1
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels a = object() # For specifying empty leaf dict `{}` a = object() def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Dict ): """simple docstring""" _lowerCAmelCase :Dict = tuple((re.compile(x + '$' ) for x in qs) ) for i in range(len(__magic_name__ ) - len(__magic_name__ ) + 1 ): _lowerCAmelCase :int = [x.match(__magic_name__ ) for x, y in zip(__magic_name__ , ks[i:] )] if matches and all(__magic_name__ ): return True return False def UpperCamelCase_( __magic_name__ : List[str] ): """simple docstring""" def replace(__magic_name__ : List[Any] , __magic_name__ : int ): for rule, replacement in rules: if _match(__magic_name__ , __magic_name__ ): return replacement return val return replace def UpperCamelCase_( ): """simple docstring""" return [ # embeddings (("transformer", "wpe", "embedding"), P('mp' , __magic_name__ )), (("transformer", "wte", "embedding"), P('mp' , __magic_name__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__magic_name__ , 'mp' )), (("attention", "out_proj", "kernel"), P('mp' , __magic_name__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__magic_name__ , 'mp' )), (("mlp", "c_fc", "bias"), P('mp' )), (("mlp", "c_proj", "kernel"), P('mp' , __magic_name__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def UpperCamelCase_( __magic_name__ : Tuple ): """simple docstring""" _lowerCAmelCase :str = _get_partition_rules() _lowerCAmelCase :Optional[Any] = _replacement_rules(__magic_name__ ) _lowerCAmelCase :List[Any] = {k: _unmatched for k in flatten_dict(__magic_name__ )} _lowerCAmelCase :Optional[int] = {k: replace(__magic_name__ , __magic_name__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__magic_name__ ) )
687
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py a = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ a = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ a = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ): _lowerCAmelCase :Any = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
687
1
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ): return F'''gaussian_noise_s={seed}_shape={"_".join([str(UpperCamelCase__ ) for s in shape] )}.npy''' def UpperCAmelCase__ ( self : Dict ): super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Union[str, Any]=(4, 4, 64, 64) , __UpperCamelCase : Optional[Any]=False ): _UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ ) return image def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : Dict=False , __UpperCamelCase : List[Any]="CompVis/stable-diffusion-v1-4" ): _UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _UpperCAmelCase = '''bf16''' if fpaa else None _UpperCAmelCase = FlaxUNetaDConditionModel.from_pretrained( UpperCamelCase__ , subfolder="unet" , dtype=UpperCamelCase__ , revision=UpperCamelCase__ ) return model, params def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : List[str]=0 , __UpperCamelCase : Dict=(4, 77, 768) , __UpperCamelCase : Optional[int]=False ): _UpperCAmelCase = jnp.bfloataa if fpaa else jnp.floataa _UpperCAmelCase = jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : str ): _UpperCAmelCase = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=UpperCamelCase__ ) _UpperCAmelCase = self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ ) _UpperCAmelCase = self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ ) _UpperCAmelCase = model.apply( {"params": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample assert sample.shape == latents.shape _UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCAmelCase = jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ): _UpperCAmelCase = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=UpperCamelCase__ ) _UpperCAmelCase = self.get_latents(UpperCamelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase__ ) _UpperCAmelCase = self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 77, 1_024) , fpaa=UpperCamelCase__ ) _UpperCAmelCase = model.apply( {"params": params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample assert sample.shape == latents.shape _UpperCAmelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCAmelCase = jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-2 )
712
from ..utils import DummyObject, requires_backends class __SCREAMING_SNAKE_CASE ( metaclass=lowercase): __SCREAMING_SNAKE_CASE : Optional[int] = ["""keras_nlp"""] def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ): requires_backends(self , ["keras_nlp"] )
129
0
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path __lowerCAmelCase = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(4_2) __lowerCAmelCase = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} __lowerCAmelCase = "zero2" __lowerCAmelCase = "zero3" __lowerCAmelCase = [ZEROa, ZEROa] def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> int: # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param _UpperCAmelCase = parameterized.to_safe_name("_".join(str(_lowerCAmelCase ) for x in param.args ) ) return F'''{func.__name__}_{param_based_name}''' # Cartesian-product of zero stages with models to test __lowerCAmelCase = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class __SCREAMING_SNAKE_CASE ( lowercase): @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) @require_torch_multi_gpu @parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase ) def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ): self.run_and_check( stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : int ): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ): _UpperCAmelCase = models[model] _UpperCAmelCase = self.run_trainer( stage=__UpperCamelCase , model_name=__UpperCamelCase , eval_steps=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , ) self.do_checks(__UpperCamelCase ) return output_dir def UpperCAmelCase__ ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 1 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ): _UpperCAmelCase = self.get_auto_remove_tmp_dir("./xxx" , after=__UpperCamelCase ) _UpperCAmelCase = F''' --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(__UpperCamelCase )} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none '''.split() if fpaa: args.extend(["--fp16"] ) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files _UpperCAmelCase = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split() _UpperCAmelCase = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py'''] _UpperCAmelCase = self.get_launcher(__UpperCamelCase ) _UpperCAmelCase = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(__UpperCamelCase , env=self.get_env() ) return output_dir def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Dict=False ): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) _UpperCAmelCase = min(2 , get_gpu_count() ) if distributed else 1 return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
684
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 __lowerCAmelCase = get_tests_dir("fixtures") class __SCREAMING_SNAKE_CASE ( unittest.TestCase): def UpperCAmelCase__ ( self : Dict ): # A mock response for an HTTP head request to emulate server down _UpperCAmelCase = mock.Mock() _UpperCAmelCase = 500 _UpperCAmelCase = {} _UpperCAmelCase = HTTPError _UpperCAmelCase = {} # Download this model to make sure it's in the cache. _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=__UpperCamelCase ) as mock_head: _UpperCAmelCase = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def UpperCAmelCase__ ( self : List[Any] ): # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def UpperCAmelCase__ ( self : Dict ): with self.assertRaises(__UpperCamelCase ): # config is in subfolder, the following should not work without specifying the subfolder _UpperCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" ) self.assertIsNotNone(__UpperCamelCase ) @is_staging_test class __SCREAMING_SNAKE_CASE ( unittest.TestCase): @classmethod def UpperCAmelCase__ ( cls : str ): _UpperCAmelCase = TOKEN HfFolder.save_token(__UpperCamelCase ) @classmethod def UpperCAmelCase__ ( cls : Optional[Any] ): try: delete_repo(token=cls._token , repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" ) except HTTPError: pass def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="test-image-processor" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : Union[str, Any] ): _UpperCAmelCase = ViTImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( __UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCamelCase , use_auth_token=self._token ) _UpperCAmelCase = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(__UpperCamelCase , getattr(__UpperCamelCase , __UpperCamelCase ) ) def UpperCAmelCase__ ( self : int ): CustomImageProcessor.register_for_auto_class() _UpperCAmelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase ) image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , ) _UpperCAmelCase = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCamelCase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
684
1
'''simple docstring''' import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCamelCase : Any = logging.get_logger(__name__) _lowerCamelCase : Any = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } _lowerCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Tuple: """simple docstring""" for attribute in key.split('.' ): UpperCamelCase = getattr(A__ , A__ ) if weight_type is not None: UpperCamelCase = getattr(A__ , A__ ).shape else: UpperCamelCase = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" F""" {value.shape} for {full_name}""" ) if weight_type == "weight": UpperCamelCase = value elif weight_type == "weight_g": UpperCamelCase = value elif weight_type == "weight_v": UpperCamelCase = value elif weight_type == "bias": UpperCamelCase = value elif weight_type == "running_mean": UpperCamelCase = value elif weight_type == "running_var": UpperCamelCase = value elif weight_type == "num_batches_tracked": UpperCamelCase = value elif weight_type == "inv_freq": UpperCamelCase = value else: UpperCamelCase = value logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict: """simple docstring""" UpperCamelCase = [] UpperCamelCase = fairseq_model.state_dict() UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): UpperCamelCase = False if "conv_layers" in name: load_conv_layer( A__ , A__ , A__ , A__ , hf_model.config.feat_extract_norm == 'group' , ) UpperCamelCase = True else: for key, mapped_key in MAPPING.items(): UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: UpperCamelCase = True if "*" in mapped_key: UpperCamelCase = name.split(A__ )[0].split('.' )[-2] UpperCamelCase = mapped_key.replace('*' , A__ ) if "pos_bias_u" in name: UpperCamelCase = None elif "pos_bias_v" in name: UpperCamelCase = None elif "weight_g" in name: UpperCamelCase = 'weight_g' elif "weight_v" in name: UpperCamelCase = 'weight_v' elif "bias" in name: UpperCamelCase = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj UpperCamelCase = 'weight' elif "running_mean" in name: UpperCamelCase = 'running_mean' elif "inv_freq" in name: UpperCamelCase = 'inv_freq' elif "running_var" in name: UpperCamelCase = 'running_var' elif "num_batches_tracked" in name: UpperCamelCase = 'num_batches_tracked' else: UpperCamelCase = None set_recursively(A__ , A__ , A__ , A__ , A__ ) continue if not is_used: unused_weights.append(A__ ) logger.warning(F"""Unused weights: {unused_weights}""" ) def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> Optional[int]: """simple docstring""" UpperCamelCase = full_name.split('conv_layers.' )[-1] UpperCamelCase = name.split('.' ) UpperCamelCase = int(items[0] ) UpperCamelCase = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F"""{full_name} has size {value.shape}, but""" F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" ) UpperCamelCase = value logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(A__ ) @torch.no_grad() def __lowerCamelCase ( A__ , A__ , A__=None , A__=None , A__=True ) -> Optional[Any]: """simple docstring""" if config_path is not None: UpperCamelCase = WavaVecaConformerConfig.from_pretrained(A__ , hidden_act='swish' ) else: UpperCamelCase = WavaVecaConformerConfig() if "rope" in checkpoint_path: UpperCamelCase = 'rotary' if is_finetuned: if dict_path: UpperCamelCase = Dictionary.load(A__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq UpperCamelCase = target_dict.pad_index UpperCamelCase = target_dict.bos_index UpperCamelCase = target_dict.eos_index UpperCamelCase = len(target_dict.symbols ) UpperCamelCase = os.path.join(A__ , 'vocab.json' ) if not os.path.isdir(A__ ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A__ ) ) return os.makedirs(A__ , exist_ok=A__ ) UpperCamelCase = target_dict.indices # fairseq has the <pad> and <s> switched UpperCamelCase = 0 UpperCamelCase = 1 with open(A__ , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(A__ , A__ ) UpperCamelCase = WavaVecaCTCTokenizer( A__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A__ , ) UpperCamelCase = True if config.feat_extract_norm == 'layer' else False UpperCamelCase = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=A__ , return_attention_mask=A__ , ) UpperCamelCase = WavaVecaProcessor(feature_extractor=A__ , tokenizer=A__ ) processor.save_pretrained(A__ ) UpperCamelCase = WavaVecaConformerForCTC(A__ ) else: UpperCamelCase = WavaVecaConformerForPreTraining(A__ ) if is_finetuned: UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: UpperCamelCase = argparse.Namespace(task='audio_pretraining' ) UpperCamelCase = fairseq.tasks.setup_task(A__ ) UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A__ ) UpperCamelCase = model[0].eval() recursively_load_weights(A__ , A__ , not is_finetuned ) hf_wavavec.save_pretrained(A__ ) if __name__ == "__main__": _lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _lowerCamelCase : Dict = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
324
'''simple docstring''' from __future__ import annotations from typing import Any class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self : str , UpperCamelCase__ : int ): """simple docstring""" UpperCamelCase = num_of_nodes UpperCamelCase = [] UpperCamelCase = {} def A ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" self.m_edges.append([u_node, v_node, weight] ) def A ( self : List[Any] , UpperCamelCase__ : int ): """simple docstring""" if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def A ( self : Union[str, Any] , UpperCamelCase__ : int ): """simple docstring""" if self.m_component[u_node] != u_node: for k in self.m_component: UpperCamelCase = self.find_component(UpperCamelCase__ ) def A ( self : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int ): """simple docstring""" if component_size[u_node] <= component_size[v_node]: UpperCamelCase = v_node component_size[v_node] += component_size[u_node] self.set_component(UpperCamelCase__ ) elif component_size[u_node] >= component_size[v_node]: UpperCamelCase = self.find_component(UpperCamelCase__ ) component_size[u_node] += component_size[v_node] self.set_component(UpperCamelCase__ ) def A ( self : List[Any] ): """simple docstring""" UpperCamelCase = [] UpperCamelCase = 0 UpperCamelCase = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCamelCase = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCamelCase , UpperCamelCase , UpperCamelCase = edge UpperCamelCase = self.m_component[u] UpperCamelCase = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCamelCase = [u, v, w] for edge in minimum_weight_edge: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase , UpperCamelCase , UpperCamelCase = edge UpperCamelCase = self.m_component[u] UpperCamelCase = self.m_component[v] if u_component != v_component: mst_weight += w self.union(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" ) num_of_components -= 1 UpperCamelCase = [-1] * self.m_num_of_nodes print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" ) def __lowerCamelCase ( ) -> None: """simple docstring""" if __name__ == "__main__": import doctest doctest.testmod()
324
1
'''simple docstring''' import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=3 , lowerCamelCase_=32 , lowerCamelCase_=3 , lowerCamelCase_=10 , lowerCamelCase_=[10, 20, 30, 40] , lowerCamelCase_=[1, 1, 2, 1] , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_="relu" , lowerCamelCase_=3 , lowerCamelCase_=None , ) -> Any: lowerCAmelCase__ = parent lowerCAmelCase__ = batch_size lowerCAmelCase__ = image_size lowerCAmelCase__ = num_channels lowerCAmelCase__ = embeddings_size lowerCAmelCase__ = hidden_sizes lowerCAmelCase__ = depths lowerCAmelCase__ = is_training lowerCAmelCase__ = use_labels lowerCAmelCase__ = hidden_act lowerCAmelCase__ = num_labels lowerCAmelCase__ = scope lowerCAmelCase__ = len(lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ = self.get_config() return config, pixel_values def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: lowerCAmelCase__ = FlaxRegNetModel(config=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = self.num_labels lowerCAmelCase__ = FlaxRegNetForImageClassification(config=lowerCamelCase_ ) lowerCAmelCase__ = model(lowerCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __SCREAMING_SNAKE_CASE ( self ) -> int: lowerCAmelCase__ = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs lowerCAmelCase__ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class a__ ( a__ , unittest.TestCase ): '''simple docstring''' lowercase__ : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase__ : List[Any] = False lowercase__ : Union[str, Any] = False lowercase__ : Optional[int] = False def __SCREAMING_SNAKE_CASE ( self ) -> None: lowerCAmelCase__ = FlaxRegNetModelTester(self ) lowerCAmelCase__ = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]: return def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __SCREAMING_SNAKE_CASE ( self ) -> List[str]: pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: pass def __SCREAMING_SNAKE_CASE ( self ) -> Tuple: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = model_class(lowerCamelCase_ ) lowerCAmelCase__ = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ = [*signature.parameters.keys()] lowerCAmelCase__ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Any: def check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): lowerCAmelCase__ = model_class(lowerCamelCase_ ) lowerCAmelCase__ = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) ) lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase_ ) , expected_num_stages + 1 ) lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ = True check_hidden_states_output(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ = self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) lowerCAmelCase__ = model_class(lowerCamelCase_ ) @jax.jit def model_jitted(lowerCamelCase_ , **lowerCamelCase_ ): return model(pixel_values=lowerCamelCase_ , **lowerCamelCase_ ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ = model_jitted(**lowerCamelCase_ ).to_tuple() self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) ) for jitted_output, output in zip(lowerCamelCase_ , lowerCamelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def _snake_case ( ) -> Union[str, Any]: lowerCAmelCase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_flax class a__ ( unittest.TestCase ): '''simple docstring''' @cached_property def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]: return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __SCREAMING_SNAKE_CASE ( self ) -> str: lowerCAmelCase__ = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowerCAmelCase__ = self.default_image_processor lowerCAmelCase__ = prepare_img() lowerCAmelCase__ = image_processor(images=lowerCamelCase_ , return_tensors='''np''' ) lowerCAmelCase__ = model(**lowerCamelCase_ ) # verify the logits lowerCAmelCase__ = (1, 10_00) self.assertEqual(outputs.logits.shape , lowerCamelCase_ ) lowerCAmelCase__ = jnp.array([-0.4_180, -1.5_051, -3.4_836] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowerCamelCase_ , atol=1e-4 ) )
90
'''simple docstring''' import re def _snake_case ( A ) -> bool: lowerCAmelCase__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' ) if match := re.search(A , A ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('''+918827897895'''))
90
1
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
715
'''simple docstring''' from typing import Any import numpy as np def UpperCAmelCase_ ( lowercase__ ): '''simple docstring''' return np.array_equal(lowercase__ , matrix.conjugate().T ) def UpperCAmelCase_ ( lowercase__ , lowercase__ ): '''simple docstring''' a_ =v.conjugate().T a_ =v_star.dot(lowercase__ ) assert isinstance(lowercase__ , np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def UpperCAmelCase_ ( ): '''simple docstring''' a_ =np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) a_ =np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F"""{a} is not hermitian.""" print(rayleigh_quotient(lowercase__ , lowercase__ ) ) a_ =np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F"""{a} is not hermitian.""" assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
41
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor __magic_name__ : Optional[int] = logging.get_logger(__name__) class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , *_A , **_A ): '''simple docstring''' warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , _A , ) super().__init__(*_A , **_A )
102
'''simple docstring''' import copy import json import os import tempfile from transformers import is_torch_available from .test_configuration_utils import config_common_kwargs class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase ) -> Dict: lowercase__ : Any = parent lowercase__ : List[Any] = config_class lowercase__ : Dict = has_text_modality lowercase__ : List[str] = kwargs lowercase__ : List[Any] = common_properties def _lowerCAmelCase( self ) -> Any: lowercase__ : int = self.config_class(**self.inputs_dict ) lowercase__ : Any = ( ['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers'''] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(['''vocab_size'''] ) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) , msg=F"""`{prop}` does not exist""" ) # Test that config has the common properties as setter for idx, name in enumerate(__lowerCAmelCase ): try: setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) self.parent.assertEqual( getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , msg=F"""`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(__lowerCAmelCase ): try: lowercase__ : Tuple = self.config_class(**{name: idx} ) self.parent.assertEqual( getattr(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , msg=F"""`{name} value {idx} expected, but was {getattr(__lowerCAmelCase , __lowerCAmelCase )}""" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def _lowerCAmelCase( self ) -> Tuple: lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) lowercase__ : int = json.loads(config.to_json_string() ) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key] , __lowerCAmelCase ) def _lowerCAmelCase( self ) -> int: lowercase__ : List[str] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Tuple = os.path.join(__lowerCAmelCase , '''config.json''' ) config_first.to_json_file(__lowerCAmelCase ) lowercase__ : Tuple = self.config_class.from_json_file(__lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCAmelCase( self ) -> Optional[Any]: lowercase__ : Optional[int] = self.config_class(**self.inputs_dict ) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(__lowerCAmelCase ) lowercase__ : Dict = self.config_class.from_pretrained(__lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCAmelCase( self ) -> Optional[int]: lowercase__ : Union[str, Any] = self.config_class(**self.inputs_dict ) lowercase__ : str = '''test''' with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Any = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) config_first.save_pretrained(__lowerCAmelCase ) lowercase__ : Tuple = self.config_class.from_pretrained(__lowerCAmelCase , subfolder=__lowerCAmelCase ) self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() ) def _lowerCAmelCase( self ) -> List[Any]: lowercase__ : List[Any] = self.config_class(**self.inputs_dict , num_labels=5 ) self.parent.assertEqual(len(config.idalabel ) , 5 ) self.parent.assertEqual(len(config.labelaid ) , 5 ) lowercase__ : List[Any] = 3 self.parent.assertEqual(len(config.idalabel ) , 3 ) self.parent.assertEqual(len(config.labelaid ) , 3 ) def _lowerCAmelCase( self ) -> Any: if self.config_class.is_composition: return lowercase__ : Tuple = self.config_class() self.parent.assertIsNotNone(__lowerCAmelCase ) def _lowerCAmelCase( self ) -> Any: lowercase__ : str = copy.deepcopy(__lowerCAmelCase ) lowercase__ : Dict = self.config_class(**__lowerCAmelCase ) lowercase__ : Dict = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.floataa: wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) ) elif getattr(__lowerCAmelCase , __lowerCAmelCase ) != value: wrong_values.append((key, getattr(__lowerCAmelCase , __lowerCAmelCase ), value) ) if len(__lowerCAmelCase ) > 0: lowercase__ : Any = '''\n'''.join([F"""- {v[0]}: got {v[1]} instead of {v[2]}""" for v in wrong_values] ) raise ValueError(F"""The following keys were not properly set in the config:\n{errors}""" ) def _lowerCAmelCase( self ) -> Any: self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
152
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class _SCREAMING_SNAKE_CASE : def __init__( self : List[str] , a__ : int , a__ : Any=2 , a__ : Union[str, Any]=8 , a__ : Union[str, Any]=True , a__ : Union[str, Any]=True , a__ : List[str]=True , a__ : Dict=True , a__ : Any=99 , a__ : int=16 , a__ : Optional[int]=5 , a__ : Union[str, Any]=2 , a__ : Optional[int]=36 , a__ : str="gelu" , a__ : Tuple=0.0 , a__ : Dict=0.0 , a__ : Optional[int]=512 , a__ : Dict=16 , a__ : List[Any]=2 , a__ : Any=0.02 , a__ : Optional[Any]=3 , a__ : str=4 , a__ : List[Any]=None , ): __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def snake_case__ ( self : Dict ): __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def snake_case__ ( self : List[str] ): return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , ) def snake_case__ ( self : Optional[Any] ): __magic_name__ = self.get_config() __magic_name__ = 300 return config def snake_case__ ( self : int ): ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = self.prepare_config_and_inputs() __magic_name__ = True __magic_name__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def snake_case__ ( self : Optional[Any] , a__ : Optional[Any] , a__ : int , a__ : int , a__ : Dict , a__ : Tuple , a__ : Optional[int] , a__ : Any ): __magic_name__ = MraModel(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ ) __magic_name__ = model(a__ , token_type_ids=a__ ) __magic_name__ = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : Optional[Any] , a__ : List[str] , a__ : Any , a__ : Tuple , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : List[str] , a__ : str , a__ : List[Any] , a__ : Optional[int] , ): __magic_name__ = True __magic_name__ = MraModel(a__ ) model.to(a__ ) model.eval() __magic_name__ = model( a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , ) __magic_name__ = model( a__ , attention_mask=a__ , token_type_ids=a__ , encoder_hidden_states=a__ , ) __magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case__ ( self : List[str] , a__ : List[str] , a__ : Any , a__ : Any , a__ : Tuple , a__ : Optional[int] , a__ : int , a__ : List[Any] ): __magic_name__ = MraForMaskedLM(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case__ ( self : Dict , a__ : Tuple , a__ : Any , a__ : str , a__ : List[str] , a__ : Union[str, Any] , a__ : str , a__ : Dict ): __magic_name__ = MraForQuestionAnswering(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = model( a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case__ ( self : List[Any] , a__ : Tuple , a__ : Tuple , a__ : Union[str, Any] , a__ : str , a__ : Optional[int] , a__ : Dict , a__ : Union[str, Any] ): __magic_name__ = self.num_labels __magic_name__ = MraForSequenceClassification(a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def snake_case__ ( self : Union[str, Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , a__ : Union[str, Any] , a__ : Tuple , a__ : str ): __magic_name__ = self.num_labels __magic_name__ = MraForTokenClassification(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case__ ( self : List[Any] , a__ : Any , a__ : List[Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Optional[int] , a__ : Any , a__ : Any ): __magic_name__ = self.num_choices __magic_name__ = MraForMultipleChoice(config=a__ ) model.to(a__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case__ ( self : Tuple ): __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ): __SCREAMING_SNAKE_CASE :List[str] = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE :str = False __SCREAMING_SNAKE_CASE :List[Any] = False __SCREAMING_SNAKE_CASE :List[Any] = False __SCREAMING_SNAKE_CASE :Optional[int] = False __SCREAMING_SNAKE_CASE :List[str] = () def snake_case__ ( self : List[str] ): __magic_name__ = MraModelTester(self ) __magic_name__ = ConfigTester(self , config_class=a__ , hidden_size=37 ) def snake_case__ ( self : int ): self.config_tester.run_common_tests() def snake_case__ ( self : List[str] ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a__ ) def snake_case__ ( self : Union[str, Any] ): __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*a__ ) def snake_case__ ( self : str ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a__ ) def snake_case__ ( self : List[str] ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a__ ) def snake_case__ ( self : str ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) def snake_case__ ( self : Any ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def snake_case__ ( self : Any ): __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) @slow def snake_case__ ( self : Dict ): for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = MraModel.from_pretrained(a__ ) self.assertIsNotNone(a__ ) @unittest.skip(reason='''MRA does not output attentions''' ) def snake_case__ ( self : Union[str, Any] ): return @require_torch class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def snake_case__ ( self : Any ): __magic_name__ = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' ) __magic_name__ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __magic_name__ = model(a__ )[0] __magic_name__ = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , a__ ) __magic_name__ = torch.tensor( [[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) ) @slow def snake_case__ ( self : List[str] ): __magic_name__ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' ) __magic_name__ = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __magic_name__ = model(a__ )[0] __magic_name__ = 5_0265 __magic_name__ = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , a__ ) __magic_name__ = torch.tensor( [[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) ) @slow def snake_case__ ( self : Optional[Any] ): __magic_name__ = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' ) __magic_name__ = torch.arange(4096 ).unsqueeze(0 ) with torch.no_grad(): __magic_name__ = model(a__ )[0] __magic_name__ = 5_0265 __magic_name__ = torch.Size((1, 4096, vocab_size) ) self.assertEqual(output.shape , a__ ) __magic_name__ = torch.tensor( [[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1E-4 ) )
245
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase = logging.get_logger(__name__) _lowerCAmelCase = { "microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class _SCREAMING_SNAKE_CASE ( __a ): __SCREAMING_SNAKE_CASE :List[str] = """biogpt""" def __init__( self : Union[str, Any] , a__ : Dict=4_2384 , a__ : Union[str, Any]=1024 , a__ : List[Any]=24 , a__ : Any=16 , a__ : List[Any]=4096 , a__ : Any="gelu" , a__ : Optional[int]=0.1 , a__ : List[Any]=0.1 , a__ : Optional[Any]=1024 , a__ : Union[str, Any]=0.02 , a__ : int=1E-12 , a__ : List[Any]=True , a__ : Tuple=True , a__ : str=0.0 , a__ : Any=0.0 , a__ : Optional[int]=1 , a__ : Tuple=0 , a__ : Dict=2 , **a__ : Tuple , ): __magic_name__ = vocab_size __magic_name__ = max_position_embeddings __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = scale_embedding __magic_name__ = use_cache __magic_name__ = layerdrop __magic_name__ = activation_dropout super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
245
1
'''simple docstring''' from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" lowerCamelCase : Any = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias'] @register_to_config def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 5_02_57 , SCREAMING_SNAKE_CASE_ = 10_24 , SCREAMING_SNAKE_CASE_ = 7_68 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = 12 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "gelu_new" , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 0.1 , SCREAMING_SNAKE_CASE_ = 1E-5 , SCREAMING_SNAKE_CASE_ = 0.0_2 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , ) -> Tuple: super().__init__() __lowerCamelCase : int = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( f'`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and' f' `n_embd`: {n_embd} are not equal.' ) __lowerCamelCase : List[Any] = prefix_inner_dim __lowerCamelCase : Any = prefix_hidden_dim __lowerCamelCase : Dict = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCamelCase : Dict = ( nn.Linear(self.prefix_hidden_dim , SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None else nn.Identity() ) __lowerCamelCase : str = GPTaConfig( vocab_size=SCREAMING_SNAKE_CASE_ , n_positions=SCREAMING_SNAKE_CASE_ , n_embd=SCREAMING_SNAKE_CASE_ , n_layer=SCREAMING_SNAKE_CASE_ , n_head=SCREAMING_SNAKE_CASE_ , n_inner=SCREAMING_SNAKE_CASE_ , activation_function=SCREAMING_SNAKE_CASE_ , resid_pdrop=SCREAMING_SNAKE_CASE_ , embd_pdrop=SCREAMING_SNAKE_CASE_ , attn_pdrop=SCREAMING_SNAKE_CASE_ , layer_norm_epsilon=SCREAMING_SNAKE_CASE_ , initializer_range=SCREAMING_SNAKE_CASE_ , scale_attn_weights=SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_ , scale_attn_by_inverse_layer_idx=SCREAMING_SNAKE_CASE_ , reorder_and_upcast_attn=SCREAMING_SNAKE_CASE_ , ) __lowerCamelCase : Dict = GPTaLMHeadModel(SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> List[str]: __lowerCamelCase : Any = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Tuple = self.encode_prefix(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[str] = self.decode_prefix(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Any = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __lowerCamelCase : Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __lowerCamelCase : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 ) __lowerCamelCase : Union[str, Any] = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor: return torch.zeros(SCREAMING_SNAKE_CASE_ , self.prefix_length , dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ ) def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]: return self.encode_prefix(SCREAMING_SNAKE_CASE_ ) @torch.no_grad() def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: __lowerCamelCase : int = torch.split(SCREAMING_SNAKE_CASE_ , 1 , dim=0 ) __lowerCamelCase : List[str] = [] __lowerCamelCase : Tuple = [] for feature in features: __lowerCamelCase : List[str] = self.decode_prefix(feature.to(SCREAMING_SNAKE_CASE_ ) ) # back to the clip feature # Only support beam search for now __lowerCamelCase , __lowerCamelCase : int = self.generate_beam( input_embeds=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __lowerCamelCase : Optional[Any] = torch.stack(SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : Optional[int] = torch.stack(SCREAMING_SNAKE_CASE_ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = 5 , SCREAMING_SNAKE_CASE_ = 67 , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Union[str, Any]: __lowerCamelCase : Dict = eos_token_id __lowerCamelCase : Dict = None __lowerCamelCase : List[str] = None __lowerCamelCase : Tuple = torch.ones(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.int ) __lowerCamelCase : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=torch.bool ) if input_embeds is not None: __lowerCamelCase : Any = input_embeds else: __lowerCamelCase : Dict = self.transformer.transformer.wte(SCREAMING_SNAKE_CASE_ ) for i in range(SCREAMING_SNAKE_CASE_ ): __lowerCamelCase : Tuple = self.transformer(inputs_embeds=SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : int = outputs.logits __lowerCamelCase : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __lowerCamelCase : Optional[int] = logits.softmax(-1 ).log() if scores is None: __lowerCamelCase , __lowerCamelCase : Dict = logits.topk(SCREAMING_SNAKE_CASE_ , -1 ) __lowerCamelCase : Union[str, Any] = generated.expand(SCREAMING_SNAKE_CASE_ , *generated.shape[1:] ) __lowerCamelCase , __lowerCamelCase : Union[str, Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __lowerCamelCase : Optional[int] = next_tokens else: __lowerCamelCase : int = tokens.expand(SCREAMING_SNAKE_CASE_ , *tokens.shape[1:] ) __lowerCamelCase : Any = torch.cat((tokens, next_tokens) , dim=1 ) else: __lowerCamelCase : Optional[int] = -float(np.inf ) __lowerCamelCase : Union[str, Any] = 0 __lowerCamelCase : str = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __lowerCamelCase : int = scores_sum / seq_lengths[:, None] __lowerCamelCase , __lowerCamelCase : Optional[int] = scores_sum_average.view(-1 ).topk(SCREAMING_SNAKE_CASE_ , -1 ) __lowerCamelCase : int = next_tokens // scores_sum.shape[1] __lowerCamelCase : List[str] = seq_lengths[next_tokens_source] __lowerCamelCase : Optional[int] = next_tokens % scores_sum.shape[1] __lowerCamelCase : Tuple = next_tokens.unsqueeze(1 ) __lowerCamelCase : List[str] = tokens[next_tokens_source] __lowerCamelCase : Tuple = torch.cat((tokens, next_tokens) , dim=1 ) __lowerCamelCase : int = generated[next_tokens_source] __lowerCamelCase : Optional[Any] = scores_sum_average * seq_lengths __lowerCamelCase : str = is_stopped[next_tokens_source] __lowerCamelCase : List[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __lowerCamelCase : Any = torch.cat((generated, next_token_embed) , dim=1 ) __lowerCamelCase : Dict = is_stopped + next_tokens.eq(SCREAMING_SNAKE_CASE_ ).squeeze() if is_stopped.all(): break __lowerCamelCase : Tuple = scores / seq_lengths __lowerCamelCase : Tuple = scores.argsort(descending=SCREAMING_SNAKE_CASE_ ) # tokens tensors are already padded to max_seq_length __lowerCamelCase : Union[str, Any] = [tokens[i] for i in order] __lowerCamelCase : List[Any] = torch.stack(SCREAMING_SNAKE_CASE_ , dim=0 ) __lowerCamelCase : int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
13
import gc import threading import time import psutil import torch class lowerCAmelCase_ : """simple docstring""" def __init__( self ) -> Any: __UpperCamelCase = psutil.Process() __UpperCamelCase = False def __lowercase( self ) -> Optional[Any]: __UpperCamelCase = -1 while True: __UpperCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def __lowercase( self ) -> Dict: __UpperCamelCase = True __UpperCamelCase = threading.Thread(target=self.peak_monitor ) __UpperCamelCase = True self.thread.start() def __lowercase( self ) -> List[str]: __UpperCamelCase = False self.thread.join() return self.cpu_memory_peak _snake_case = PeakCPUMemory() def _a ( ) -> List[Any]: """simple docstring""" __UpperCamelCase = {'time': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem __UpperCamelCase = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): __UpperCamelCase = torch.cuda.memory_allocated(__lowercase ) torch.cuda.reset_peak_memory_stats() return measures def _a ( __lowercase ) -> Dict: """simple docstring""" __UpperCamelCase = {'time': time.time() - start_measures['time']} gc.collect() torch.cuda.empty_cache() # CPU mem __UpperCamelCase = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20 __UpperCamelCase = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): __UpperCamelCase = (torch.cuda.memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20 __UpperCamelCase = (torch.cuda.max_memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20 return measures def _a ( __lowercase , __lowercase ) -> Any: """simple docstring""" print(F"""{description}:""" ) print(F"""- Time: {measures['time']:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(F"""- GPU {i} allocated: {measures[str(__lowercase )]:.2f}MiB""" ) __UpperCamelCase = measures[F"""{i}-peak"""] print(F"""- GPU {i} peak: {peak:.2f}MiB""" ) print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" ) print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
383
0
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a__ ( A__ , unittest.TestCase ): UpperCAmelCase__ = LongformerTokenizer UpperCAmelCase__ = True UpperCAmelCase__ = LongformerTokenizerFast UpperCAmelCase__ = True def lowerCamelCase_ ( self :List[str] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCamelCase_ : Union[str, Any] =[ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] UpperCamelCase_ : int =dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) ) UpperCamelCase_ : Optional[Any] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] UpperCamelCase_ : List[str] ={'unk_token': '<unk>'} UpperCamelCase_ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) UpperCamelCase_ : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(_lowerCamelCase ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(_lowerCamelCase ) ) def lowerCamelCase_ ( self :int , **_lowerCamelCase :List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowerCamelCase_ ( self :str , **_lowerCamelCase :Optional[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase ) def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[str] ): '''simple docstring''' UpperCamelCase_ : Union[str, Any] ='lower newer' UpperCamelCase_ : Any ='lower newer' return input_text, output_text def lowerCamelCase_ ( self :Union[str, Any] ): '''simple docstring''' UpperCamelCase_ : str =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCamelCase_ : Union[str, Any] ='lower newer' UpperCamelCase_ : Optional[Any] =['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] UpperCamelCase_ : int =tokenizer.tokenize(_lowerCamelCase ) # , add_prefix_space=True) self.assertListEqual(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_ : Optional[Any] =tokens + [tokenizer.unk_token] UpperCamelCase_ : List[str] =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase ) def lowerCamelCase_ ( self :Tuple ): '''simple docstring''' UpperCamelCase_ : Any =self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=_lowerCamelCase ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=_lowerCamelCase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def lowerCamelCase_ ( self :Optional[int] ): '''simple docstring''' UpperCamelCase_ : Any =self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' ) UpperCamelCase_ : int =tokenizer.encode('sequence builders' , add_special_tokens=_lowerCamelCase ) UpperCamelCase_ : Tuple =tokenizer.encode('multi-sequence build' , add_special_tokens=_lowerCamelCase ) UpperCamelCase_ : int =tokenizer.encode( 'sequence builders' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase ) UpperCamelCase_ : Dict =tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase ) UpperCamelCase_ : Any =tokenizer.build_inputs_with_special_tokens(_lowerCamelCase ) UpperCamelCase_ : str =tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def lowerCamelCase_ ( self :Optional[int] ): '''simple docstring''' UpperCamelCase_ : List[Any] =self.get_tokenizer() UpperCamelCase_ : Optional[int] ='Encode this sequence.' UpperCamelCase_ : Any =tokenizer.byte_encoder[' '.encode('utf-8' )[0]] # Testing encoder arguments UpperCamelCase_ : Optional[int] =tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase ) UpperCamelCase_ : Dict =tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_ : Optional[int] =tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase , add_prefix_space=_lowerCamelCase ) UpperCamelCase_ : Optional[Any] =tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) tokenizer.add_special_tokens({'bos_token': '<s>'} ) UpperCamelCase_ : Optional[int] =tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase ) UpperCamelCase_ : Tuple =tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(_lowerCamelCase , _lowerCamelCase ) # Testing spaces after special tokens UpperCamelCase_ : Optional[int] ='<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )} ) # mask token has a left space UpperCamelCase_ : Any =tokenizer.convert_tokens_to_ids(_lowerCamelCase ) UpperCamelCase_ : List[str] ='Encode <mask> sequence' UpperCamelCase_ : Dict ='Encode <mask>sequence' UpperCamelCase_ : Dict =tokenizer.encode(_lowerCamelCase ) UpperCamelCase_ : List[Any] =encoded.index(_lowerCamelCase ) UpperCamelCase_ : int =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(_lowerCamelCase , _lowerCamelCase ) UpperCamelCase_ : int =tokenizer.encode(_lowerCamelCase ) UpperCamelCase_ : List[Any] =encoded.index(_lowerCamelCase ) UpperCamelCase_ : str =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(_lowerCamelCase , _lowerCamelCase ) def lowerCamelCase_ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase_ ( self :str ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase_ : Optional[int] =self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) UpperCamelCase_ : Union[str, Any] =self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase ) UpperCamelCase_ : Tuple ='A, <mask> AllenNLP sentence.' UpperCamelCase_ : Tuple =tokenizer_r.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase ) UpperCamelCase_ : List[str] =tokenizer_p.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , ) UpperCamelCase_ : Optional[Any] =tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] ) UpperCamelCase_ : Tuple =tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( _lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) self.assertSequenceEqual( _lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] ) def lowerCamelCase_ ( self :Union[str, Any] ): '''simple docstring''' for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCamelCase_ : Tuple =self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : Dict =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCamelCase_ : List[str] =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , _lowerCamelCase ) self.assertEqual(post_processor_state['add_prefix_space'] , _lowerCamelCase ) self.assertEqual(post_processor_state['trim_offsets'] , _lowerCamelCase ) def lowerCamelCase_ ( self :Any ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase_ : Optional[int] ='hello' # `hello` is a token in the vocabulary of `pretrained_name` UpperCamelCase_ : Optional[int] =f'''{text_of_1_token} {text_of_1_token}''' UpperCamelCase_ : str =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : Optional[Any] =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) UpperCamelCase_ : Tuple =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : Union[str, Any] =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowerCamelCase ) + 1, len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) UpperCamelCase_ : int =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : List[str] =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowerCamelCase ), len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) UpperCamelCase_ : List[Any] =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : List[Any] =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (len(_lowerCamelCase ), len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) UpperCamelCase_ : Any =f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCamelCase_ : Any =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : int =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ) + 1, 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) UpperCamelCase_ : Optional[Any] =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : Optional[Any] =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ), 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , ) UpperCamelCase_ : Optional[int] =self.rust_tokenizer_class.from_pretrained( _lowerCamelCase , use_fast=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase ) UpperCamelCase_ : Optional[int] =tokenizer_r(_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowerCamelCase )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(_lowerCamelCase ), 1 + len(_lowerCamelCase ) + 1 + len(_lowerCamelCase )) , )
720
"""simple docstring""" def A_ ( __lowercase ): UpperCamelCase_ : List[str] ='' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( __lowercase ): UpperCamelCase_ : int =[chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase_ : Any =remove_duplicates(key.upper() ) UpperCamelCase_ : int =len(__lowercase ) # First fill cipher with key characters UpperCamelCase_ : Union[str, Any] ={alphabet[i]: char for i, char in enumerate(__lowercase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__lowercase ) , 26 ): UpperCamelCase_ : List[Any] =alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase_ : str =alphabet[i - offset] UpperCamelCase_ : int =char return cipher_alphabet def A_ ( __lowercase , __lowercase ): return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() ) def A_ ( __lowercase , __lowercase ): UpperCamelCase_ : str ={v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() ) def A_ ( ): UpperCamelCase_ : Tuple =input('Enter message to encode or decode: ' ).strip() UpperCamelCase_ : int =input('Enter keyword: ' ).strip() UpperCamelCase_ : List[Any] =input('Encipher or decipher? E/D:' ).strip()[0].lower() try: UpperCamelCase_ : List[str] ={'e': encipher, 'd': decipher}[option] except KeyError: raise KeyError('invalid input option' ) UpperCamelCase_ : List[Any] =create_cipher_map(__lowercase ) print(func(__lowercase , __lowercase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
395
0
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __a = logging.get_logger(__name__) __a = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __a = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __a = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __a = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __a = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __a = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __a( _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __a = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __a = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __a = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_a ) class __a: """simple docstring""" def __call__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> BatchEncoding: if titles is None and texts is None: return super().__call__( _SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) elif titles is None or texts is None: UpperCAmelCase_ : List[str] = titles if texts is None else texts return super().__call__( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ,return_attention_mask=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) UpperCAmelCase_ : List[Any] = titles if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [titles] UpperCAmelCase_ : List[str] = texts if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [texts] UpperCAmelCase_ : Any = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : List[Any] = questions if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) else [questions] * n_passages if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f'''There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.''' ) UpperCAmelCase_ : Tuple = super().__call__(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : int = super().__call__(_SCREAMING_SNAKE_CASE ,add_special_tokens=_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,truncation=_SCREAMING_SNAKE_CASE )['''input_ids'''] UpperCAmelCase_ : Optional[int] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) ] } if return_attention_mask is not False: UpperCAmelCase_ : List[str] = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) UpperCAmelCase_ : Dict = attention_mask return self.pad(_SCREAMING_SNAKE_CASE ,padding=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ,_SCREAMING_SNAKE_CASE = 64 ,_SCREAMING_SNAKE_CASE = 4 ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = reader_input['''input_ids'''] UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = reader_output[:3] UpperCAmelCase_ : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : int = sorted(range(_SCREAMING_SNAKE_CASE ) ,reverse=_SCREAMING_SNAKE_CASE ,key=relevance_logits.__getitem__ ) UpperCAmelCase_ : List[DPRReaderOutput] = [] for doc_id in sorted_docs: UpperCAmelCase_ : List[Any] = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence UpperCAmelCase_ : str = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: UpperCAmelCase_ : List[Any] = sequence_ids.index(self.pad_token_id ) else: UpperCAmelCase_ : int = len(_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_SCREAMING_SNAKE_CASE ,top_spans=_SCREAMING_SNAKE_CASE ,) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_SCREAMING_SNAKE_CASE ,start_index=_SCREAMING_SNAKE_CASE ,end_index=_SCREAMING_SNAKE_CASE ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) ) if len(_SCREAMING_SNAKE_CASE ) >= num_spans: break return nbest_spans_predictions[:num_spans] def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> List[DPRSpanPrediction]: UpperCAmelCase_ : Tuple = [] for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) UpperCAmelCase_ : int = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] ,reverse=_SCREAMING_SNAKE_CASE ) UpperCAmelCase_ : Union[str, Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) UpperCAmelCase_ : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_SCREAMING_SNAKE_CASE ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_a ) class __a( _a , _a ): """simple docstring""" lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = ['''input_ids''', '''attention_mask''']
30
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): UpperCamelCase : Any = StableUnCLIPImgaImgPipeline UpperCamelCase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS UpperCamelCase : Optional[int] = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess UpperCamelCase : str = frozenset([] ) def lowerCamelCase__ ( self : Dict ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_: Union[str, Any] =32 SCREAMING_SNAKE_CASE_: Optional[Any] =embedder_hidden_size # image encoding components SCREAMING_SNAKE_CASE_: Tuple =CLIPImageProcessor(crop_size=32 , size=32 ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: int =CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: List[Any] =StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Tuple =DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: str =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: List[Any] =CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: str =UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: str =DDIMScheduler( beta_schedule="""scaled_linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="""v_prediction""" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE_: Dict =AutoencoderKL() SCREAMING_SNAKE_CASE_: List[str] ={ # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def lowerCamelCase__ ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Union[str, Any]=True ) -> List[str]: '''simple docstring''' if str(lowerCAmelCase ).startswith("""mps""" ): SCREAMING_SNAKE_CASE_: Optional[int] =torch.manual_seed(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE_: List[str] =torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Optional[Any] =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase ) if pil_image: SCREAMING_SNAKE_CASE_: Optional[Any] =input_image * 0.5 + 0.5 SCREAMING_SNAKE_CASE_: Optional[Any] =input_image.clamp(0 , 1 ) SCREAMING_SNAKE_CASE_: Optional[Any] =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() SCREAMING_SNAKE_CASE_: Any =DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any ="""cpu""" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_: Optional[int] =self.get_dummy_components() SCREAMING_SNAKE_CASE_: str =StableUnCLIPImgaImgPipeline(**lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] =sd_pipe.to(lowerCAmelCase ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase ) SCREAMING_SNAKE_CASE_: Union[str, Any] =self.get_dummy_inputs(lowerCAmelCase ) inputs.update({"""image_embeds""": None} ) SCREAMING_SNAKE_CASE_: Optional[Any] =sd_pipe(**lowerCAmelCase ).images SCREAMING_SNAKE_CASE_: int =image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE_: List[Any] =np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def lowerCamelCase__ ( self : Dict ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_: Optional[int] =torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase ) def lowerCamelCase__ ( self : Any ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase__ ( self : int ) -> Any: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase ) @slow @require_torch_gpu class a ( unittest.TestCase ): def lowerCamelCase__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Any ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: int =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) SCREAMING_SNAKE_CASE_: Any =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" ) SCREAMING_SNAKE_CASE_: Union[str, Any] =StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: Optional[int] =torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE_: Dict =pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" ) SCREAMING_SNAKE_CASE_: Optional[int] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) def lowerCamelCase__ ( self : int ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) SCREAMING_SNAKE_CASE_: Union[str, Any] =load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" ) SCREAMING_SNAKE_CASE_: List[str] =StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: Union[str, Any] =torch.Generator(device="""cpu""" ).manual_seed(0 ) SCREAMING_SNAKE_CASE_: str =pipe(lowerCAmelCase , """anime turle""" , generator=lowerCAmelCase , output_type="""np""" ) SCREAMING_SNAKE_CASE_: List[Any] =output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase ) def lowerCamelCase__ ( self : Tuple ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_: Any =load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE_: Optional[Any] =StableUnCLIPImgaImgPipeline.from_pretrained( """fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE_: str =pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_: int =pipe( lowerCAmelCase , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , ) SCREAMING_SNAKE_CASE_: str =torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
409
0
import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class __A ( unittest.TestCase ): UpperCamelCase :List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def _snake_case (self , __magic_name__ , __magic_name__ , __magic_name__ ): lowerCamelCase__ : Any = hf_hub_download( repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) lowerCamelCase__ : List[Any] = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 ) lowerCamelCase__ : Tuple = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def _snake_case (self , __magic_name__ , __magic_name__ ): for example in examples: lowerCamelCase__ : Any = video_classifier(_UpperCamelCase ) self.assertEqual( _UpperCamelCase , [ {"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )}, {"""score""": ANY(_UpperCamelCase ), """label""": ANY(_UpperCamelCase )}, ] , ) @require_torch def _snake_case (self ): lowerCamelCase__ : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" lowerCamelCase__ : Any = VideoMAEFeatureExtractor( size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} ) lowerCamelCase__ : str = pipeline( """video-classification""" , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 ) lowerCamelCase__ : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) lowerCamelCase__ : str = video_classifier(_UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(_UpperCamelCase , decimals=4 ) , [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}] , ) lowerCamelCase__ : List[Any] = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(_UpperCamelCase , decimals=4 ) , [ [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}], [{"""score""": 0.51_99, """label""": """LABEL_0"""}, {"""score""": 0.48_01, """label""": """LABEL_1"""}], ] , ) @require_tf def _snake_case (self ): pass
715
from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def _A (UpperCamelCase : str ) ->None: '''simple docstring''' lowerCamelCase__ ,lowerCamelCase__ : List[str] = analyze_text(UpperCamelCase ) lowerCamelCase__ : Any = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCamelCase__ : str = sum(single_char_strings.values() ) # one length string lowerCamelCase__ : Union[str, Any] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCamelCase__ : List[Any] = single_char_strings[ch] lowerCamelCase__ : List[str] = my_str / all_sum my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula. # print entropy print(f"{round(-1 * my_fir_sum ):.1f}" ) # two len string lowerCamelCase__ : str = sum(two_char_strings.values() ) lowerCamelCase__ : int = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCamelCase__ : str = cha + cha if sequence in two_char_strings: lowerCamelCase__ : str = two_char_strings[sequence] lowerCamelCase__ : int = int(UpperCamelCase ) / all_sum my_sec_sum += prob * math.loga(UpperCamelCase ) # print second entropy print(f"{round(-1 * my_sec_sum ):.1f}" ) # print the difference between them print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" ) def _A (UpperCamelCase : str ) ->tuple[dict, dict]: '''simple docstring''' lowerCamelCase__ : Optional[int] = Counter() # type: ignore lowerCamelCase__ : List[Any] = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(UpperCamelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def _A () ->List[str]: '''simple docstring''' import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
96
0
'''simple docstring''' import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] )-> Union[str, Any]: '''simple docstring''' __snake_case = FunnelConfig.from_json_file(_lowerCamelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) __snake_case = FunnelBaseModel(_lowerCamelCase ) if base_model else FunnelModel(_lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_funnel(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _lowerCamelCase ) if __name__ == "__main__": UpperCAmelCase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.''' ) UpperCAmelCase_ : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
24
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCamelCase_ = logging.get_logger(__name__) def __magic_name__ ( __a : List[Any] , __a : Optional[int] , __a : Optional[int] ): '''simple docstring''' return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def __magic_name__ ( __a : np.ndarray , __a : Optional[str] , __a : Optional[str] = None ): '''simple docstring''' UpperCamelCase__ = tesseract_config if tesseract_config is not None else """""" # apply OCR UpperCamelCase__ = to_pil_image(__a ) UpperCamelCase__ , UpperCamelCase__ = pil_image.size UpperCamelCase__ = pytesseract.image_to_data(__a , lang=__a , output_type="""dict""" , config=__a ) UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""] # filter empty words and corresponding coordinates UpperCamelCase__ = [idx for idx, word in enumerate(__a ) if not word.strip()] UpperCamelCase__ = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] UpperCamelCase__ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCamelCase__ = [] for x, y, w, h in zip(__a , __a , __a , __a ): UpperCamelCase__ = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes UpperCamelCase__ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a , __a , __a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class __A( __lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = ["""pixel_values"""] def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "" , **SCREAMING_SNAKE_CASE_ , ): super().__init__(**SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = size if size is not None else {"""height""": 2_24, """width""": 2_24} UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = do_resize UpperCamelCase__ = size UpperCamelCase__ = resample UpperCamelCase__ = apply_ocr UpperCamelCase__ = ocr_lang UpperCamelCase__ = tesseract_config def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) UpperCamelCase__ = (size["""height"""], size["""width"""]) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ = size if size is not None else self.size UpperCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE_ ) UpperCamelCase__ = resample if resample is not None else self.resample UpperCamelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCamelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCamelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCamelCase__ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) # All transformations expect numpy arrays. UpperCamelCase__ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] if apply_ocr: requires_backends(self , """pytesseract""" ) UpperCamelCase__ = [] UpperCamelCase__ = [] for image in images: UpperCamelCase__ , UpperCamelCase__ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) words_batch.append(SCREAMING_SNAKE_CASE_ ) boxes_batch.append(SCREAMING_SNAKE_CASE_ ) if do_resize: UpperCamelCase__ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) UpperCamelCase__ = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase__ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] UpperCamelCase__ = BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE_ ) if apply_ocr: UpperCamelCase__ = words_batch UpperCamelCase__ = boxes_batch return data
513
0
import os # Precomputes a list of the 100 first triangular numbers lowerCAmelCase_ = [int(0.5 * n * (n + 1)) for n in range(1, 101)] def snake_case ( ): A = os.path.dirname(os.path.realpath(UpperCAmelCase ) ) A = os.path.join(UpperCAmelCase, 'words.txt' ) A = '' with open(UpperCAmelCase ) as f: A = f.readline() A = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] A = [ word for word in [sum(ord(UpperCAmelCase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(UpperCAmelCase ) if __name__ == "__main__": print(solution())
110
from collections import deque from .hash_table import HashTable class UpperCamelCase ( snake_case__ ): """simple docstring""" def __init__( self : Any ,*_SCREAMING_SNAKE_CASE : Optional[int] ,**_SCREAMING_SNAKE_CASE : Optional[Any] ) -> int: '''simple docstring''' super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) def A( self : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Tuple ) -> List[str]: '''simple docstring''' A = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(_SCREAMING_SNAKE_CASE ) A = self.values[key] def A( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' return ( sum(self.charge_factor - len(_SCREAMING_SNAKE_CASE ) for slot in self.values ) / self.size_table * self.charge_factor ) def A( self : Dict ,_SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : str=None ) -> Union[str, Any]: '''simple docstring''' if not ( len(self.values[key] ) == self.charge_factor and self.values.count(_SCREAMING_SNAKE_CASE ) == 0 ): return key return super()._collision_resolution(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
110
1
"""simple docstring""" from manim import * class a_ ( __lowerCAmelCase ): def _snake_case ( self : List[Any] ) ->Any: '''simple docstring''' _UpperCAmelCase = Rectangle(height=0.5 , width=0.5 ) _UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) _UpperCAmelCase = Rectangle(height=0.2_5 , width=0.2_5 ) _UpperCAmelCase = [mem.copy() for i in range(6 )] _UpperCAmelCase = [mem.copy() for i in range(6 )] _UpperCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = Text("""CPU""" , font_size=24 ) _UpperCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) _UpperCAmelCase = [mem.copy() for i in range(4 )] _UpperCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = Text("""GPU""" , font_size=24 ) _UpperCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCamelCase_ ) _UpperCAmelCase = [mem.copy() for i in range(6 )] _UpperCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = Text("""Model""" , font_size=24 ) _UpperCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCamelCase_ ) _UpperCAmelCase = [] _UpperCAmelCase = [] for i, rect in enumerate(lowerCamelCase_ ): _UpperCAmelCase = fill.copy().set_fill(lowerCamelCase_ , opacity=0.8 ) target.move_to(lowerCamelCase_ ) model_arr.append(lowerCamelCase_ ) _UpperCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCamelCase_ ) self.add(*lowerCamelCase_ , *lowerCamelCase_ ) _UpperCAmelCase = [meta_mem.copy() for i in range(6 )] _UpperCAmelCase = [meta_mem.copy() for i in range(6 )] _UpperCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) _UpperCAmelCase = Text("""Disk""" , font_size=24 ) _UpperCAmelCase = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) disk.move_to([-4, -1.2_5, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) _UpperCAmelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _UpperCAmelCase = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCamelCase_ , lowerCamelCase_ ) _UpperCAmelCase = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(lowerCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCamelCase_ ) _UpperCAmelCase = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ ) ) _UpperCAmelCase = Square(0.3 ) input.set_fill(lowerCamelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCamelCase_ , buff=0.5 ) self.play(Write(lowerCamelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCamelCase_ , buff=0.0_2 ) self.play(MoveToTarget(lowerCamelCase_ ) ) self.play(FadeOut(lowerCamelCase_ ) ) _UpperCAmelCase = Arrow(start=lowerCamelCase_ , end=lowerCamelCase_ , color=lowerCamelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCamelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) _UpperCAmelCase = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) ) _UpperCAmelCase = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.0_2} self.play( Write(lowerCamelCase_ ) , Circumscribe(model_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) _UpperCAmelCase = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , lowerCamelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) _UpperCAmelCase = AnimationGroup( FadeOut(lowerCamelCase_ , run_time=0.5 ) , MoveToTarget(lowerCamelCase_ , run_time=0.5 ) , FadeIn(lowerCamelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCamelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: _UpperCAmelCase = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCamelCase_ , **lowerCamelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCamelCase_ , **lowerCamelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) _UpperCAmelCase = a_c _UpperCAmelCase = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(lowerCamelCase_ ) , FadeOut(lowerCamelCase_ , run_time=0.5 ) , ) _UpperCAmelCase = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=3 ) , MoveToTarget(lowerCamelCase_ ) ) self.wait()
555
from __future__ import annotations class SCREAMING_SNAKE_CASE_ : def __init__( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : str ): """simple docstring""" UpperCamelCase , UpperCamelCase = text, pattern UpperCamelCase , UpperCamelCase = len(lowerCamelCase_ ), len(lowerCamelCase_ ) def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : str ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if char == self.pattern[i]: return i return -1 def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int ): """simple docstring""" for i in range(self.patLen - 1 , -1 , -1 ): if self.pattern[i] != self.text[current_pos + i]: return current_pos + i return -1 def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = [] for i in range(self.textLen - self.patLen + 1 ): UpperCamelCase = self.mismatch_in_text(lowerCamelCase_ ) if mismatch_index == -1: positions.append(lowerCamelCase_ ) else: UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] ) UpperCamelCase = ( mismatch_index - match_index ) # shifting index lgtm [py/multiple-definition] return positions _SCREAMING_SNAKE_CASE = """ABAABA""" _SCREAMING_SNAKE_CASE = """AB""" _SCREAMING_SNAKE_CASE = BoyerMooreSearch(text, pattern) _SCREAMING_SNAKE_CASE = bms.bad_character_heuristic() if len(positions) == 0: print("""No match found""") else: print("""Pattern found in following positions: """) print(positions)
537
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SCREAMING_SNAKE_CASE_ = get_tests_dir('fixtures') class a ( unittest.TestCase ): def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase : int = mock.Mock() _UpperCAmelCase : Optional[int] = 500 _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : int = HTTPError _UpperCAmelCase : Optional[Any] = {} # Download this model to make sure it's in the cache. _UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" , return_value=A_ ) as mock_head: _UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" ) # This check we did call the fake head request mock_head.assert_called() def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" ) @is_staging_test class a ( unittest.TestCase ): @classmethod def _UpperCAmelCase ( cls ): '''simple docstring''' _UpperCAmelCase : Dict = TOKEN HfFolder.save_token(A_ ) @classmethod def _UpperCAmelCase ( cls ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-feature-extractor" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" ) except HTTPError: pass def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token ) _UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_ , getattr(A_ , A_ ) ) # Reset repo delete_repo(token=self._token , repo_id="test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_ , repo_id="test-feature-extractor" , push_to_hub=A_ , use_auth_token=self._token ) _UpperCAmelCase : int = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_ , getattr(A_ , A_ ) ) def _UpperCAmelCase ( self ): '''simple docstring''' _UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token ) _UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_ , getattr(A_ , A_ ) ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A_ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=A_ , use_auth_token=self._token ) _UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A_ , getattr(A_ , A_ ) ) def _UpperCAmelCase ( self ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() _UpperCAmelCase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(A_ ) feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , ) _UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained( f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=A_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
467
import warnings from ...utils import logging from .image_processing_perceiver import PerceiverImageProcessor SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) class a ( UpperCAmelCase ): def __init__( self , *A_ , **A_ ): '''simple docstring''' warnings.warn( "The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use PerceiverImageProcessor instead." , A_ , ) super().__init__(*A_ , **A_ )
467
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __lowerCamelCase = False class __A ( unittest.TestCase ): def lowerCamelCase__ ( self : Optional[int] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]: return 1_2 @property def lowerCamelCase__ ( self : Dict ) -> Any: return 1_2 @property def lowerCamelCase__ ( self : List[Any] ) -> Tuple: return 3_2 @property def lowerCamelCase__ ( self : Optional[int] ) -> Tuple: torch.manual_seed(0 ) __magic_name__: Union[str, Any] = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCamelCase__ ( self : Any ) -> str: __magic_name__: Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase__ ( self : Any ) -> List[Any]: torch.manual_seed(0 ) __magic_name__: List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(A__ ) @property def lowerCamelCase__ ( self : Dict ) -> Optional[int]: torch.manual_seed(0 ) __magic_name__: Any = 1_2 __magic_name__: Dict = 1_2 __magic_name__: int = { """attention_bias""": True, """cross_attention_dim""": 3_2, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 3_2, """sample_size""": width, """activation_fn""": """geglu-approximate""", } __magic_name__: Optional[int] = TransformeraDModel(**A__ ) return model def lowerCamelCase__ ( self : int ) -> Tuple: __magic_name__: Any = """cpu""" __magic_name__: List[Any] = self.dummy_vqvae __magic_name__: List[str] = self.dummy_text_encoder __magic_name__: List[str] = self.dummy_tokenizer __magic_name__: Optional[Any] = self.dummy_transformer __magic_name__: str = VQDiffusionScheduler(self.num_embed ) __magic_name__: Dict = LearnedClassifierFreeSamplingEmbeddings(learnable=A__ ) __magic_name__: Tuple = VQDiffusionPipeline( vqvae=A__ , text_encoder=A__ , tokenizer=A__ , transformer=A__ , scheduler=A__ , learned_classifier_free_sampling_embeddings=A__ , ) __magic_name__: Tuple = pipe.to(A__ ) pipe.set_progress_bar_config(disable=A__ ) __magic_name__: Dict = """teddy bear playing in the pool""" __magic_name__: Optional[int] = torch.Generator(device=A__ ).manual_seed(0 ) __magic_name__: Any = pipe([prompt] , generator=A__ , num_inference_steps=2 , output_type="""np""" ) __magic_name__: Optional[int] = output.images __magic_name__: Optional[Any] = torch.Generator(device=A__ ).manual_seed(0 ) __magic_name__: str = pipe( [prompt] , generator=A__ , output_type="""np""" , return_dict=A__ , num_inference_steps=2 )[0] __magic_name__: Optional[int] = image[0, -3:, -3:, -1] __magic_name__: str = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) __magic_name__: List[Any] = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self : Union[str, Any] ) -> Any: __magic_name__: Union[str, Any] = """cpu""" __magic_name__: Optional[int] = self.dummy_vqvae __magic_name__: Dict = self.dummy_text_encoder __magic_name__: Optional[Any] = self.dummy_tokenizer __magic_name__: str = self.dummy_transformer __magic_name__: List[Any] = VQDiffusionScheduler(self.num_embed ) __magic_name__: str = LearnedClassifierFreeSamplingEmbeddings( learnable=A__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __magic_name__: List[str] = VQDiffusionPipeline( vqvae=A__ , text_encoder=A__ , tokenizer=A__ , transformer=A__ , scheduler=A__ , learned_classifier_free_sampling_embeddings=A__ , ) __magic_name__: Union[str, Any] = pipe.to(A__ ) pipe.set_progress_bar_config(disable=A__ ) __magic_name__: List[str] = """teddy bear playing in the pool""" __magic_name__: Union[str, Any] = torch.Generator(device=A__ ).manual_seed(0 ) __magic_name__: Dict = pipe([prompt] , generator=A__ , num_inference_steps=2 , output_type="""np""" ) __magic_name__: Optional[int] = output.images __magic_name__: Tuple = torch.Generator(device=A__ ).manual_seed(0 ) __magic_name__: List[str] = pipe( [prompt] , generator=A__ , output_type="""np""" , return_dict=A__ , num_inference_steps=2 )[0] __magic_name__: Optional[Any] = image[0, -3:, -3:, -1] __magic_name__: Union[str, Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) __magic_name__: List[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class __A ( unittest.TestCase ): def lowerCamelCase__ ( self : int ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : str ) -> Optional[Any]: __magic_name__: int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" ) __magic_name__: Union[str, Any] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" ) __magic_name__: Optional[int] = pipeline.to(A__ ) pipeline.set_progress_bar_config(disable=A__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __magic_name__: List[str] = torch.Generator(device=A__ ).manual_seed(0 ) __magic_name__: Optional[Any] = pipeline( """teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=A__ , output_type="""np""" , ) __magic_name__: Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
96
A_ : Union[str, Any] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' A_ : str = [{'type': 'code', 'content': INSTALL_CONTENT}] A_ : Any = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
456
0
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments __lowerCamelCase : str = logging.getLogger(__name__) @dataclass class lowerCAmelCase__ ( _a ): A = field( default=0.0 ,metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) A = field(default=_a ,metadata={"help": "Whether to SortishSamler or not."} ) A = field( default=_a ,metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) A = field(default=_a ,metadata={"help": "whether to use adafactor"} ) A = field( default=_a ,metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) A = field( default=_a ,metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) A = field(default=_a ,metadata={"help": "Dropout probability. Goes into model.config."} ) A = field( default=_a ,metadata={"help": "Attention dropout probability. Goes into model.config."} ) A = field( default="linear" ,metadata={"help": F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} ,)
714
'''simple docstring''' import random def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" lowerCamelCase_ : List[str] = a[left_index] lowerCamelCase_ : List[str] = left_index + 1 for j in range(left_index + 1 , __UpperCAmelCase ): if a[j] < pivot: lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = a[i], a[j] i += 1 lowerCamelCase_ , lowerCamelCase_ : Tuple = a[i - 1], a[left_index] return i - 1 def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): """simple docstring""" if left < right: lowerCamelCase_ : int = random.randint(__UpperCAmelCase , right - 1 ) lowerCamelCase_ , lowerCamelCase_ : Optional[int] = ( a[left], a[pivot], ) # switches the pivot with the left most bound lowerCamelCase_ : List[str] = partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) quick_sort_random( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # recursive quicksort to the left of the pivot point quick_sort_random( __UpperCAmelCase , pivot_index + 1 , __UpperCAmelCase ) # recursive quicksort to the right of the pivot point def __snake_case (): """simple docstring""" lowerCamelCase_ : Optional[int] = input('''Enter numbers separated by a comma:\n''' ).strip() lowerCamelCase_ : Optional[Any] = [int(__UpperCAmelCase ) for item in user_input.split(''',''' )] quick_sort_random(__UpperCAmelCase , 0 , len(__UpperCAmelCase ) ) print(__UpperCAmelCase ) if __name__ == "__main__": main()
418
0
'''simple docstring''' def lowercase__( __UpperCamelCase: list[int] ,__UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = int(__UpperCamelCase ) # Initialize Result SCREAMING_SNAKE_CASE : Optional[int] = [] # Traverse through all denomination for denomination in reversed(__UpperCamelCase ): # Find denominations while int(__UpperCamelCase ) >= int(__UpperCamelCase ): total_value -= int(__UpperCamelCase ) answer.append(__UpperCamelCase ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": UpperCamelCase_ = [] UpperCamelCase_ = "0" if ( input("Do you want to enter your denominations ? (yY/n): ").strip().lower() == "y" ): UpperCamelCase_ = int(input("Enter the number of denominations you want to add: ").strip()) for i in range(0, n): denominations.append(int(input(F"""Denomination {i}: """).strip())) UpperCamelCase_ = input("Enter the change you want to make in Indian Currency: ").strip() else: # All denominations of Indian Currency if user does not enter UpperCamelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0] UpperCamelCase_ = input("Enter the change you want to make: ").strip() if int(value) == 0 or int(value) < 0: print("The total value cannot be zero or negative.") else: print(F"""Following is minimal change for {value}: """) UpperCamelCase_ = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=" ")
28
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _snake_case = { '''configuration_efficientnet''': [ '''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientNetConfig''', '''EfficientNetOnnxConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['''EfficientNetImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ '''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientNetForImageClassification''', '''EfficientNetModel''', '''EfficientNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
282
0
"""simple docstring""" from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __lowerCamelCase = [ "python", "tqdm", "regex", "requests", "packaging", "filelock", "numpy", "tokenizers", "huggingface-hub", "safetensors", "accelerate", "pyyaml", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def lowercase ( __UpperCamelCase , __UpperCamelCase=None ) -> Tuple: require_version(deps[pkg] , __UpperCamelCase )
190
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL.Image from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, PILImageResampling, get_image_size, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCamelCase = logging.get_logger(__name__) class _lowercase ( __UpperCAmelCase ): _lowerCamelCase = ['''pixel_values'''] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = 32 , UpperCamelCase_=PILImageResampling.BILINEAR , UpperCamelCase_ = True , **UpperCamelCase_ , ): __magic_name__ = do_resize __magic_name__ = do_rescale __magic_name__ = size_divisor __magic_name__ = resample super().__init__(**UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ): __magic_name__ , __magic_name__ = get_image_size(UpperCamelCase_ ) # Rounds the height and width down to the closest multiple of size_divisor __magic_name__ = height // size_divisor * size_divisor __magic_name__ = width // size_divisor * size_divisor __magic_name__ = resize(UpperCamelCase_ , (new_h, new_w) , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) return image def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ): return rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ): __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = size_divisor if size_divisor is not None else self.size_divisor __magic_name__ = resample if resample is not None else self.resample if do_resize and size_divisor is None: raise ValueError('''size_divisor is required for resizing''' ) __magic_name__ = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError('''Invalid image(s)''' ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase_ ) for img in images] if do_resize: __magic_name__ = [self.resize(UpperCamelCase_ , size_divisor=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(UpperCamelCase_ , scale=1 / 255 ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __magic_name__ = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
190
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a_ :List[Any] = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def lowercase_ (A : Optional[Any] ): config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def lowercase_ (A : Union[str, Any] ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A ) def lowercase_ (A : str ): from transformers.testing_utils import pytest_terminal_summary_main snake_case__ : int = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A , id=A ) def lowercase_ (A : Optional[int] , A : List[str] ): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: snake_case__ : Optional[Any] = 0 # Doctest custom flag to ignore output. a_ :str = doctest.register_optionflag("IGNORE_RESULT") a_ :Optional[Any] = doctest.OutputChecker class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def lowercase_ ( self : Any, _snake_case : Optional[Any], _snake_case : str, _snake_case : int ) ->Optional[Any]: if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, _snake_case, _snake_case, _snake_case ) a_ :Optional[Any] = CustomOutputChecker a_ :str = HfDoctestModule a_ :List[str] = HfDocTestParser
478
from __future__ import annotations def lowercase_ (A : list , A : int | None = None , A : int | None = None ): if start is None: snake_case__ : Any = 0 if end is None: snake_case__ : List[str] = len(A ) - 1 if start >= end: return snake_case__ : Optional[int] = (start + end) // 2 slowsort(A , A , A ) slowsort(A , mid + 1 , A ) if sequence[end] < sequence[mid]: snake_case__ , snake_case__ : Union[str, Any] = sequence[mid], sequence[end] slowsort(A , A , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
478
1
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowercase__ : Any = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( """--original_config_file""", default=None, type=str, help="""The YAML config file corresponding to the original architecture.""", ) parser.add_argument( """--num_in_channels""", default=None, type=int, help="""The number of input channels. If `None` number of input channels will be automatically inferred.""", ) parser.add_argument( """--scheduler_type""", default="""pndm""", type=str, help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""", ) parser.add_argument( """--pipeline_type""", default=None, type=str, help=( """The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'""" """. If `None` pipeline will be automatically inferred.""" ), ) parser.add_argument( """--image_size""", default=None, type=int, help=( """The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2""" """ Base. Use 768 for Stable Diffusion v2.""" ), ) parser.add_argument( """--prediction_type""", default=None, type=str, help=( """The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable""" """ Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2.""" ), ) parser.add_argument( """--extract_ema""", action="""store_true""", help=( """Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights""" """ or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield""" """ higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.""" ), ) parser.add_argument( """--upcast_attention""", action="""store_true""", help=( """Whether the attention computation should always be upcasted. This is necessary when running stable""" """ diffusion 2.1.""" ), ) parser.add_argument( """--from_safetensors""", action="""store_true""", help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""", ) parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") parser.add_argument( """--stable_unclip""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""", ) parser.add_argument( """--stable_unclip_prior""", type=str, default=None, required=False, help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""", ) parser.add_argument( """--clip_stats_path""", type=str, help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""", required=False, ) parser.add_argument( """--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint.""" ) parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""") parser.add_argument( """--vae_path""", type=str, default=None, required=False, help="""Set to a path, hub id to an already converted vae to not convert it again.""", ) lowercase__ : Optional[int] = parser.parse_args() lowercase__ : Dict = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
317
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase__ : int = { """configuration_groupvit""": [ """GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GroupViTConfig""", """GroupViTOnnxConfig""", """GroupViTTextConfig""", """GroupViTVisionConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[int] = [ """GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GroupViTModel""", """GroupViTPreTrainedModel""", """GroupViTTextModel""", """GroupViTVisionModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : str = [ """TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFGroupViTModel""", """TFGroupViTPreTrainedModel""", """TFGroupViTTextModel""", """TFGroupViTVisionModel""", ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
317
1
import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__: str = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ): __SCREAMING_SNAKE_CASE = ReformerTokenizer __SCREAMING_SNAKE_CASE = ReformerTokenizerFast __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True def UpperCamelCase ( self ): super().setUp() A__ = ReformerTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self ): A__ = '''<s>''' A__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ),__lowerCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ),__lowerCamelCase ) def UpperCamelCase ( self ): A__ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0],'''<unk>''' ) self.assertEqual(vocab_keys[1],'''<s>''' ) self.assertEqual(vocab_keys[-1],'''j''' ) self.assertEqual(len(__lowerCamelCase ),1000 ) def UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size,1000 ) def UpperCamelCase ( self ): if not self.test_rust_tokenizer: return A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = '''I was born in 92000, and this is falsé.''' A__ = tokenizer.tokenize(__lowerCamelCase ) A__ = rust_tokenizer.tokenize(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase,__lowerCamelCase ) A__ = tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase ) A__ = rust_tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase ) self.assertListEqual(__lowerCamelCase,__lowerCamelCase ) A__ = self.get_rust_tokenizer() A__ = tokenizer.encode(__lowerCamelCase ) A__ = rust_tokenizer.encode(__lowerCamelCase ) self.assertListEqual(__lowerCamelCase,__lowerCamelCase ) def UpperCamelCase ( self,__lowerCamelCase=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): A__ = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase,**__lowerCamelCase ) # Simple input A__ = '''This is a simple input''' A__ = ['''This is a simple input 1''', '''This is a simple input 2'''] A__ = ('''This is a simple input''', '''This is a pair''') A__ = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(__lowerCamelCase,tokenizer_r.encode,__lowerCamelCase,max_length=__lowerCamelCase,padding='''max_length''' ) # Simple input self.assertRaises(__lowerCamelCase,tokenizer_r.encode_plus,__lowerCamelCase,max_length=__lowerCamelCase,padding='''max_length''' ) # Simple input self.assertRaises( __lowerCamelCase,tokenizer_r.batch_encode_plus,__lowerCamelCase,max_length=__lowerCamelCase,padding='''max_length''',) # Pair input self.assertRaises(__lowerCamelCase,tokenizer_r.encode,__lowerCamelCase,max_length=__lowerCamelCase,padding='''max_length''' ) # Pair input self.assertRaises(__lowerCamelCase,tokenizer_r.encode_plus,__lowerCamelCase,max_length=__lowerCamelCase,padding='''max_length''' ) # Pair input self.assertRaises( __lowerCamelCase,tokenizer_r.batch_encode_plus,__lowerCamelCase,max_length=__lowerCamelCase,padding='''max_length''',) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): A__ = ReformerTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase ) A__ = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(__lowerCamelCase,['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__lowerCamelCase ),[285, 46, 10, 170, 382],) A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( __lowerCamelCase,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ],) A__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4],) A__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase ) self.assertListEqual( __lowerCamelCase,[ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ],) @cached_property def UpperCamelCase ( self ): return ReformerTokenizer.from_pretrained('''google/reformer-crime-and-punishment''' ) @slow def UpperCamelCase ( self ): A__ = '''Hello World!''' A__ = [126, 32, 262, 152, 38, 72, 287] self.assertListEqual(__lowerCamelCase,self.big_tokenizer.encode(__lowerCamelCase ) ) @slow def UpperCamelCase ( self ): A__ = ( '''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will''' ''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth''' ) A__ = [ 108, 265, 24, 111, 4, 258, 156, 35, 28, 275, 3, 259, 297, 260, 84, 4, 35, 110, 44, 8, 259, 91, 268, 21, 11, 209, 274, 109, 266, 277, 117, 86, 93, 315, 258, 278, 258, 277, 258, 0, 258, 288, 258, 319, 258, 0, 258, 0, 258, 0, 258, 0, 258, 287, 258, 315, 258, 289, 258, 278, 99, 269, 266, 262, 8, 259, 241, 4, 217, 230, 268, 266, 55, 168, 106, 75, 193, 266, 223, 27, 49, 26, 282, 25, 264, 299, 19, 26, 0, 258, 277, 117, 86, 93, 176, 183, 270, 11, 262, 42, 61, 265, ] self.assertListEqual(__lowerCamelCase,self.big_tokenizer.encode(__lowerCamelCase ) ) @require_torch @slow def UpperCamelCase ( self ): import torch from transformers import ReformerConfig, ReformerModel # Build sequence A__ = list(self.big_tokenizer.get_vocab().keys() )[:10] A__ = ''' '''.join(__lowerCamelCase ) A__ = self.big_tokenizer.encode_plus(__lowerCamelCase,return_tensors='''pt''' ) A__ = self.big_tokenizer.batch_encode_plus([sequence, sequence],return_tensors='''pt''' ) A__ = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) A__ = encoded_sequence['''input_ids'''].shape A__ = ReformerModel(__lowerCamelCase ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**__lowerCamelCase ) model(**__lowerCamelCase ) @slow def UpperCamelCase ( self ): # fmt: off A__ = {'''input_ids''': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 A__ = [ '''This is a very simple sentence.''', '''The quick brown fox jumps over the lazy dog.''', ] self.tokenizer_integration_test_util( expected_encoding=__lowerCamelCase,model_name='''google/reformer-crime-and-punishment''',revision='''0e6c3decb8211d49bf881013425dc8b0448b3f5a''',padding=__lowerCamelCase,sequences=__lowerCamelCase,)
190
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__: List[Any] = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__: Any = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
190
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer UpperCAmelCase__ : Dict = logging.get_logger(__name__) UpperCAmelCase__ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase__ : int = { 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } UpperCAmelCase__ : Any = {'mobilebert-uncased': 5_1_2} UpperCAmelCase__ : str = {} class lowerCAmelCase_ (a__ ): """simple docstring""" __UpperCamelCase : str = VOCAB_FILES_NAMES __UpperCamelCase : int = PRETRAINED_VOCAB_FILES_MAP __UpperCamelCase : Tuple = PRETRAINED_INIT_CONFIGURATION __UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCamelCase : Tuple = MobileBertTokenizer def __init__(self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Dict: """simple docstring""" super().__init__( SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ) SCREAMING_SNAKE_CASE__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars ): SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("""type""" ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_lower_case SCREAMING_SNAKE_CASE__ : Union[str, Any] = strip_accents SCREAMING_SNAKE_CASE__ : str = tokenize_chinese_chars SCREAMING_SNAKE_CASE__ : Dict = normalizer_class(**SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE__ : List[str] = do_lower_case def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ ) return tuple(SCREAMING_SNAKE_CASE__ )
545
"""simple docstring""" import os import zipfile import requests from get_ci_error_statistics import download_artifact, get_artifacts_links def lowercase_ ( _snake_case ,_snake_case=7 ): SCREAMING_SNAKE_CASE__ : Dict = None if token is not None: SCREAMING_SNAKE_CASE__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} # The id of a workflow (not of a workflow run) SCREAMING_SNAKE_CASE__ : List[str] = """636036""" SCREAMING_SNAKE_CASE__ : Any = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs''' # On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}''' SCREAMING_SNAKE_CASE__ : str = requests.get(_snake_case ,headers=_snake_case ).json() return result["workflow_runs"] def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Tuple = get_daily_ci_runs(_snake_case ) SCREAMING_SNAKE_CASE__ : int = None for workflow_run in workflow_runs: if workflow_run["status"] == "completed": SCREAMING_SNAKE_CASE__ : Union[str, Any] = workflow_run["""id"""] break return workflow_run_id def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): SCREAMING_SNAKE_CASE__ : Dict = get_last_daily_ci_runs(_snake_case ) if workflow_run_id is not None: SCREAMING_SNAKE_CASE__ : Tuple = get_artifacts_links(worflow_run_id=_snake_case ,token=_snake_case ) for artifact_name in artifact_names: if artifact_name in artifacts_links: SCREAMING_SNAKE_CASE__ : Tuple = artifacts_links[artifact_name] download_artifact( artifact_name=_snake_case ,artifact_url=_snake_case ,output_dir=_snake_case ,token=_snake_case ) def lowercase_ ( _snake_case ,_snake_case ,_snake_case ): get_last_daily_ci_artifacts(_snake_case ,_snake_case ,_snake_case ) SCREAMING_SNAKE_CASE__ : List[str] = {} for artifact_name in artifact_names: SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_snake_case ,f'''{artifact_name}.zip''' ) if os.path.isfile(_snake_case ): SCREAMING_SNAKE_CASE__ : List[Any] = {} with zipfile.ZipFile(_snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(_snake_case ): # read the file with z.open(_snake_case ) as f: SCREAMING_SNAKE_CASE__ : List[Any] = f.read().decode("""UTF-8""" ) return results
545
1
"""simple docstring""" def lowercase (_lowerCAmelCase = 400_0000 ): __lowerCAmelCase = [] __lowerCAmelCase = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(lowerCAmelCase_ ) __lowerCAmelCase = b, a + b return sum(lowerCAmelCase_ ) if __name__ == "__main__": print(F"{solution() = }")
465
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class lowerCAmelCase__ : """simple docstring""" def __init__( self , a_ , a_=2 , a_=True , a_=False , a_=10 , a_=3 , a_=32 * 4 , a_=32 * 6 , a_=4 , a_=32 , ): lowerCamelCase_ : int = parent lowerCamelCase_ : int = batch_size lowerCamelCase_ : str = is_training lowerCamelCase_ : Dict = use_auxiliary_loss lowerCamelCase_ : Any = num_queries lowerCamelCase_ : List[Any] = num_channels lowerCamelCase_ : Any = min_size lowerCamelCase_ : Union[str, Any] = max_size lowerCamelCase_ : Optional[Any] = num_labels lowerCamelCase_ : Any = mask_feature_size def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( a_ ) lowerCamelCase_ : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a_ ) lowerCamelCase_ : Dict = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a_ ) > 0.5 ).float() lowerCamelCase_ : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=a_ ) > 0.5).long() lowerCamelCase_ : str = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def _UpperCamelCase ( self ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = self.prepare_config_and_inputs() lowerCamelCase_ : Dict = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def _UpperCamelCase ( self , a_ , a_ ): lowerCamelCase_ : int = output.encoder_hidden_states lowerCamelCase_ : Any = output.pixel_decoder_hidden_states lowerCamelCase_ : Union[str, Any] = output.transformer_decoder_hidden_states self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(a_ ) , config.decoder_config.decoder_layers ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_=False ): with torch.no_grad(): lowerCamelCase_ : List[str] = MaskFormerModel(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : str = model(pixel_values=a_ , pixel_mask=a_ ) lowerCamelCase_ : Optional[Any] = model(a_ , output_hidden_states=a_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(a_ , a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ ): lowerCamelCase_ : Optional[int] = MaskFormerForInstanceSegmentation(config=a_ ) model.to(a_ ) model.eval() def comm_check_on_output(a_ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): lowerCamelCase_ : Optional[int] = model(pixel_values=a_ , pixel_mask=a_ ) lowerCamelCase_ : str = model(a_ ) comm_check_on_output(a_ ) lowerCamelCase_ : List[str] = model( pixel_values=a_ , pixel_mask=a_ , mask_labels=a_ , class_labels=a_ ) comm_check_on_output(a_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () __UpperCAmelCase : Union[str, Any] = ( {'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) __UpperCAmelCase : List[str] = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : List[Any] = False __UpperCAmelCase : List[str] = False def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = MaskFormerModelTester(self ) lowerCamelCase_ : Union[str, Any] = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(a_ , **a_ , output_hidden_states=a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*a_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def _UpperCamelCase ( self ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def _UpperCamelCase ( self ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def _UpperCamelCase ( self ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def _UpperCamelCase ( self ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def _UpperCamelCase ( self ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def _UpperCamelCase ( self ): pass def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Tuple = model_class(a_ ) lowerCamelCase_ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase_ : List[Any] = [*signature.parameters.keys()] lowerCamelCase_ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , a_ ) @slow def _UpperCamelCase ( self ): for model_name in ["facebook/maskformer-swin-small-coco"]: lowerCamelCase_ : Optional[int] = MaskFormerModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = (self.model_tester.min_size,) * 2 lowerCamelCase_ : Dict = { "pixel_values": torch.randn((2, 3, *size) , device=a_ ), "mask_labels": torch.randn((2, 10, *size) , device=a_ ), "class_labels": torch.zeros(2 , 10 , device=a_ ).long(), } lowerCamelCase_ : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(a_ ) lowerCamelCase_ : int = model(**a_ ) self.assertTrue(outputs.loss is not None ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(a_ , **a_ , output_hidden_states=a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase_ : Union[str, Any] = model_class(a_ ).to(a_ ) lowerCamelCase_ : Union[str, Any] = model(**a_ , output_attentions=a_ ) self.assertTrue(outputs.attentions is not None ) def _UpperCamelCase ( self ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss lowerCamelCase_ : str = self.all_model_classes[1] lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() lowerCamelCase_ : List[str] = model_class(a_ ) model.to(a_ ) model.train() lowerCamelCase_ : int = model(a_ , mask_labels=a_ , class_labels=a_ ).loss loss.backward() def _UpperCamelCase ( self ): # only MaskFormerForInstanceSegmentation has the loss lowerCamelCase_ : str = self.all_model_classes[1] lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() lowerCamelCase_ : List[str] = True lowerCamelCase_ : int = True lowerCamelCase_ : Optional[int] = model_class(a_ ) model.to(a_ ) model.train() lowerCamelCase_ : Union[str, Any] = model(a_ , mask_labels=a_ , class_labels=a_ ) lowerCamelCase_ : Union[str, Any] = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() lowerCamelCase_ : Dict = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't lowerCamelCase_ : Tuple = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() lowerCamelCase_ : Dict = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=a_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __magic_name__ = 1E-4 def __magic_name__ ( ): '''simple docstring''' lowerCamelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_vision @slow class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def _UpperCamelCase ( self ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(a_ ) lowerCamelCase_ : List[str] = self.default_image_processor lowerCamelCase_ : Tuple = prepare_img() lowerCamelCase_ : Dict = image_processor(a_ , return_tensors="pt" ).to(a_ ) lowerCamelCase_ : Optional[int] = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(a_ , (1, 3, 800, 1088) ) with torch.no_grad(): lowerCamelCase_ : Optional[int] = model(**a_ ) lowerCamelCase_ : str = torch.tensor( [[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(a_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) ) lowerCamelCase_ : Tuple = torch.tensor( [[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(a_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) ) lowerCamelCase_ : Any = torch.tensor( [[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(a_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a_ , atol=a_ ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(a_ ) .eval() ) lowerCamelCase_ : List[str] = self.default_image_processor lowerCamelCase_ : int = prepare_img() lowerCamelCase_ : Dict = image_processor(a_ , return_tensors="pt" ).to(a_ ) lowerCamelCase_ : Union[str, Any] = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(a_ , (1, 3, 800, 1088) ) with torch.no_grad(): lowerCamelCase_ : Dict = model(**a_ ) # masks_queries_logits lowerCamelCase_ : Any = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCamelCase_ : Any = [ [-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33], [-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95], [-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42], ] lowerCamelCase_ : int = torch.tensor(a_ ).to(a_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) ) # class_queries_logits lowerCamelCase_ : Union[str, Any] = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCamelCase_ : Any = torch.tensor( [ [1.6512E00, -5.2572E00, -3.3519E00], [3.6169E-02, -5.9025E00, -2.9313E00], [1.0766E-04, -7.7630E00, -5.1263E00], ] ).to(a_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(a_ ) .eval() ) lowerCamelCase_ : Optional[int] = self.default_image_processor lowerCamelCase_ : int = prepare_img() lowerCamelCase_ : Any = image_processor(a_ , return_tensors="pt" ).to(a_ ) lowerCamelCase_ : Dict = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(a_ , (1, 3, 800, 1088) ) with torch.no_grad(): lowerCamelCase_ : int = model(**a_ ) # masks_queries_logits lowerCamelCase_ : Tuple = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) lowerCamelCase_ : str = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]] lowerCamelCase_ : Optional[Any] = torch.tensor(a_ ).to(a_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) ) # class_queries_logits lowerCamelCase_ : Any = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) lowerCamelCase_ : Union[str, Any] = torch.tensor( [[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(a_ ) .eval() ) lowerCamelCase_ : Optional[int] = self.default_image_processor lowerCamelCase_ : Tuple = image_processor( [np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) lowerCamelCase_ : Dict = inputs["pixel_values"].to(a_ ) lowerCamelCase_ : Dict = [el.to(a_ ) for el in inputs["mask_labels"]] lowerCamelCase_ : Optional[Any] = [el.to(a_ ) for el in inputs["class_labels"]] with torch.no_grad(): lowerCamelCase_ : Optional[Any] = model(**a_ ) self.assertTrue(outputs.loss is not None )
250
0
"""simple docstring""" import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): _UpperCamelCase : str = CpmAntTokenizer _UpperCamelCase : List[Any] = False def __A ( self ): super().setUp() _lowerCAmelCase : Dict = [ """<d>""", """</d>""", """<s>""", """</s>""", """</_>""", """<unk>""", """<pad>""", """</n>""", """我""", """是""", """C""", """P""", """M""", """A""", """n""", """t""", ] _lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) @tooslow def __A ( self ): _lowerCAmelCase : Tuple = CpmAntTokenizer.from_pretrained("""openbmb/cpm-ant-10b""" ) _lowerCAmelCase : Optional[Any] = """今天天气真好!""" _lowerCAmelCase : Any = ["""今天""", """天气""", """真""", """好""", """!"""] _lowerCAmelCase : str = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) _lowerCAmelCase : Tuple = """今天天气真好!""" _lowerCAmelCase : Optional[Any] = [tokenizer.bos_token] + tokens _lowerCAmelCase : Optional[int] = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ ) _lowerCAmelCase : Tuple = tokenizer.decode(a__ ) self.assertEqual(a__ , a__ )
720
"""simple docstring""" def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000000 ) -> int: _lowerCAmelCase : List[str] = [i - 1 for i in range(limit + 1 )] for i in range(2 ,limit + 1 ): if phi[i] == i - 1: for j in range(2 * i ,limit + 1 ,_lowerCamelCase ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
663
0
import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __lowerCAmelCase ( _a ): def lowerCamelCase (self ) -> Dict: '''simple docstring''' snake_case_ : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__magic_name__ , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__magic_name__ , '''neck_hidden_sizes''' ) ) self.parent.assertTrue(hasattr(__magic_name__ , '''num_attention_heads''' ) ) class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__=13 , __magic_name__=32 , __magic_name__=2 , __magic_name__=3 , __magic_name__=640 , __magic_name__=4 , __magic_name__="silu" , __magic_name__=3 , __magic_name__=32 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=0.02 , __magic_name__=True , __magic_name__=True , __magic_name__=10 , __magic_name__=None , ) -> Tuple: '''simple docstring''' snake_case_ : Union[str, Any] = parent snake_case_ : str = batch_size snake_case_ : Union[str, Any] = image_size snake_case_ : Any = patch_size snake_case_ : Union[str, Any] = num_channels snake_case_ : Dict = last_hidden_size snake_case_ : Dict = num_attention_heads snake_case_ : str = hidden_act snake_case_ : Optional[int] = conv_kernel_size snake_case_ : str = output_stride snake_case_ : int = hidden_dropout_prob snake_case_ : Optional[Any] = attention_probs_dropout_prob snake_case_ : Union[str, Any] = classifier_dropout_prob snake_case_ : Any = use_labels snake_case_ : Tuple = is_training snake_case_ : Dict = num_labels snake_case_ : Any = initializer_range snake_case_ : List[Any] = scope def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ : Tuple = None snake_case_ : Optional[int] = None if self.use_labels: snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case_ : int = self.get_config() return config, pixel_values, labels, pixel_labels def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]: '''simple docstring''' snake_case_ : List[str] = MobileViTModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Dict = model(__magic_name__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str: '''simple docstring''' snake_case_ : Any = self.num_labels snake_case_ : Optional[int] = MobileViTForImageClassification(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Dict = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]: '''simple docstring''' snake_case_ : str = self.num_labels snake_case_ : int = MobileViTForSemanticSegmentation(__magic_name__ ) model.to(__magic_name__ ) model.eval() snake_case_ : Dict = model(__magic_name__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) snake_case_ : List[Any] = model(__magic_name__ , labels=__magic_name__ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Union[str, Any] = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ , snake_case_ : List[str] = config_and_inputs snake_case_ : int = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( _a, _a, unittest.TestCase ): lowerCamelCase_ : Tuple = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase_ : Tuple = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase_ : Dict = False lowerCamelCase_ : Optional[int] = False lowerCamelCase_ : int = False lowerCamelCase_ : int = False def lowerCamelCase (self ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = MobileViTModelTester(self ) snake_case_ : int = MobileViTConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='''MobileViT does not use inputs_embeds''' ) def lowerCamelCase (self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='''MobileViT does not support input and output embeddings''' ) def lowerCamelCase (self ) -> int: '''simple docstring''' pass @unittest.skip(reason='''MobileViT does not output attentions''' ) def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' pass def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ , snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : Any = model_class(__magic_name__ ) snake_case_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ : int = [*signature.parameters.keys()] snake_case_ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __magic_name__ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def lowerCamelCase (self ) -> Tuple: '''simple docstring''' pass def lowerCamelCase (self ) -> List[Any]: '''simple docstring''' snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ): snake_case_ : Optional[Any] = model_class(__magic_name__ ) model.to(__magic_name__ ) model.eval() with torch.no_grad(): snake_case_ : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) ) snake_case_ : Tuple = outputs.hidden_states snake_case_ : Tuple = 5 self.assertEqual(len(__magic_name__ ) , __magic_name__ ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. snake_case_ : str = 2 for i in range(len(__magic_name__ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ : int = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ : Optional[Any] = True check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__magic_name__ ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ ) @slow def lowerCamelCase (self ) -> Tuple: '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : str = MobileViTModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def lowerCamelCase_ ( ) -> Tuple: """simple docstring""" snake_case_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' return MobileViTImageProcessor.from_pretrained('''apple/mobilevit-xx-small''' ) if is_vision_available() else None @slow def lowerCamelCase (self ) -> Any: '''simple docstring''' snake_case_ : List[str] = MobileViTForImageClassification.from_pretrained('''apple/mobilevit-xx-small''' ).to(__magic_name__ ) snake_case_ : List[Any] = self.default_image_processor snake_case_ : Dict = prepare_img() snake_case_ : Optional[int] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Union[str, Any] = model(**__magic_name__ ) # verify the logits snake_case_ : Dict = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) snake_case_ : str = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1e-4 ) ) @slow def lowerCamelCase (self ) -> Union[str, Any]: '''simple docstring''' snake_case_ : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : str = model.to(__magic_name__ ) snake_case_ : Optional[Any] = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : Any = prepare_img() snake_case_ : Optional[Any] = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : Optional[int] = model(**__magic_name__ ) snake_case_ : Optional[Any] = outputs.logits # verify the logits snake_case_ : List[Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __magic_name__ ) snake_case_ : List[str] = torch.tensor( [ [[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]], [[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]], [[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]], ] , device=__magic_name__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __magic_name__ , atol=1e-4 ) ) @slow def lowerCamelCase (self ) -> List[str]: '''simple docstring''' snake_case_ : int = MobileViTForSemanticSegmentation.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : Any = model.to(__magic_name__ ) snake_case_ : int = MobileViTImageProcessor.from_pretrained('''apple/deeplabv3-mobilevit-xx-small''' ) snake_case_ : List[Any] = prepare_img() snake_case_ : str = image_processor(images=__magic_name__ , return_tensors='''pt''' ).to(__magic_name__ ) # forward pass with torch.no_grad(): snake_case_ : str = model(**__magic_name__ ) snake_case_ : str = outputs.logits.detach().cpu() snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ , target_sizes=[(50, 60)] ) snake_case_ : List[Any] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __magic_name__ ) snake_case_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=__magic_name__ ) snake_case_ : Tuple = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __magic_name__ )
60
"""simple docstring""" import random def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> dict: '''simple docstring''' lowerCamelCase__ ={i: [] for i in range(__lowerCAmelCase )} # if probability is greater or equal than 1, then generate a complete graph if probability >= 1: return complete_graph(__lowerCAmelCase ) # if probability is lower or equal than 0, then return a graph without edges if probability <= 0: return graph # for each couple of nodes, add an edge from u to v # if the number randomly generated is greater than probability probability for i in range(__lowerCAmelCase ): for j in range(i + 1 , __lowerCAmelCase ): if random.random() < probability: graph[i].append(__lowerCAmelCase ) if not directed: # if the graph is undirected, add an edge in from j to i, either graph[j].append(__lowerCAmelCase ) return graph def lowerCamelCase_ ( __lowerCAmelCase ) -> dict: '''simple docstring''' return { i: [j for j in range(__lowerCAmelCase ) if i != j] for i in range(__lowerCAmelCase ) } if __name__ == "__main__": import doctest doctest.testmod()
530
0
from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __lowercase: '''simple docstring''' __a : int = XGLMConfig __a : Dict = {} __a : Tuple = 'gelu' def __init__( self , __a , __a=14 , __a=7 , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=0.02 , ): __lowerCamelCase : Union[str, Any] = parent __lowerCamelCase : Optional[int] = batch_size __lowerCamelCase : Optional[Any] = seq_length __lowerCamelCase : Tuple = is_training __lowerCamelCase : Any = use_input_mask __lowerCamelCase : str = use_labels __lowerCamelCase : Any = vocab_size __lowerCamelCase : str = d_model __lowerCamelCase : List[Any] = num_hidden_layers __lowerCamelCase : Union[str, Any] = num_attention_heads __lowerCamelCase : Dict = ffn_dim __lowerCamelCase : Dict = activation_function __lowerCamelCase : Optional[int] = activation_dropout __lowerCamelCase : Optional[Any] = attention_dropout __lowerCamelCase : List[str] = max_position_embeddings __lowerCamelCase : Any = initializer_range __lowerCamelCase : int = None __lowerCamelCase : Tuple = 0 __lowerCamelCase : Optional[int] = 2 __lowerCamelCase : str = 1 def snake_case_ ( self ): return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def snake_case_ ( self ): __lowerCamelCase : Any = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __lowerCamelCase : Tuple = None if self.use_input_mask: __lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase : int = self.get_config() __lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def snake_case_ ( self ): return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , ) def snake_case_ ( self ): __lowerCamelCase : Any = self.prepare_config_and_inputs() ( __lowerCamelCase ) : Union[str, Any] = config_and_inputs __lowerCamelCase : Dict = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ): '''simple docstring''' __a : Optional[int] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () __a : List[str] = (TFXGLMForCausalLM,) if is_tf_available() else () __a : List[Any] = ( {'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {} ) __a : Optional[int] = False __a : Union[str, Any] = False __a : int = False def snake_case_ ( self ): __lowerCamelCase : str = TFXGLMModelTester(self ) __lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , n_embd=37 ) def snake_case_ ( self ): self.config_tester.run_common_tests() @slow def snake_case_ ( self ): for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def snake_case_ ( self ): super().test_resize_token_embeddings() @require_tf class __lowercase( unittest.TestCase ): '''simple docstring''' @slow def snake_case_ ( self , __a=True ): __lowerCamelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __lowerCamelCase : Union[str, Any] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on __lowerCamelCase : List[str] = model.generate(__a , do_sample=__a , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __a ) @slow def snake_case_ ( self ): __lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : List[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __lowerCamelCase : Any = tokenizer('Today is a nice day and' , return_tensors='tf' ) __lowerCamelCase : Union[str, Any] = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __lowerCamelCase : int = model.generate(__a , do_sample=__a , seed=[7, 0] ) __lowerCamelCase : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=__a ) __lowerCamelCase : Tuple = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(__a , __a ) @slow def snake_case_ ( self ): __lowerCamelCase : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : List[str] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __lowerCamelCase : Optional[Any] = 'left' # use different length sentences to test batching __lowerCamelCase : int = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __lowerCamelCase : str = tokenizer(__a , return_tensors='tf' , padding=__a ) __lowerCamelCase : Tuple = inputs['input_ids'] __lowerCamelCase : int = model.generate(input_ids=__a , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __lowerCamelCase : Tuple = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __lowerCamelCase : str = model.generate(input_ids=__a , max_new_tokens=12 ) __lowerCamelCase : Union[str, Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __lowerCamelCase : Tuple = model.generate(input_ids=__a , max_new_tokens=12 ) __lowerCamelCase : Any = tokenizer.batch_decode(__a , skip_special_tokens=__a ) __lowerCamelCase : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a ) __lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a ) __lowerCamelCase : List[Any] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
714
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) a_ : Tuple = '''hf-internal-testing/tiny-random-bert''' a_ : List[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''') a_ : Dict = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6''' class __lowercase( unittest.TestCase ): '''simple docstring''' def snake_case_ ( self ): __lowerCamelCase : List[Any] = cached_file(__a , __a ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(__a ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) ) with open(os.path.join(__a , 'refs' , 'main' ) ) as f: __lowerCamelCase : Optional[int] = f.read() self.assertEqual(__a , os.path.join(__a , 'snapshots' , __a , __a ) ) self.assertTrue(os.path.isfile(__a ) ) # File is cached at the same place the second time. __lowerCamelCase : Optional[Any] = cached_file(__a , __a ) self.assertEqual(__a , __a ) # Using a specific revision to test the full commit hash. __lowerCamelCase : Optional[int] = cached_file(__a , __a , revision='9b8c223' ) self.assertEqual(__a , os.path.join(__a , 'snapshots' , __a , __a ) ) def snake_case_ ( self ): with self.assertRaisesRegex(__a , 'is not a valid model identifier' ): __lowerCamelCase : List[str] = cached_file('tiny-random-bert' , __a ) with self.assertRaisesRegex(__a , 'is not a valid git identifier' ): __lowerCamelCase : int = cached_file(__a , __a , revision='aaaa' ) with self.assertRaisesRegex(__a , 'does not appear to have a file named' ): __lowerCamelCase : List[Any] = cached_file(__a , 'conf' ) def snake_case_ ( self ): with self.assertRaisesRegex(__a , 'does not appear to have a file named' ): __lowerCamelCase : Dict = cached_file(__a , 'conf' ) with open(os.path.join(__a , 'refs' , 'main' ) ) as f: __lowerCamelCase : Any = f.read() self.assertTrue(os.path.isfile(os.path.join(__a , '.no_exist' , __a , 'conf' ) ) ) __lowerCamelCase : str = cached_file(__a , 'conf' , _raise_exceptions_for_missing_entries=__a ) self.assertIsNone(__a ) __lowerCamelCase : int = cached_file(__a , 'conf' , local_files_only=__a , _raise_exceptions_for_missing_entries=__a ) self.assertIsNone(__a ) __lowerCamelCase : List[str] = mock.Mock() __lowerCamelCase : str = 500 __lowerCamelCase : Union[str, Any] = {} __lowerCamelCase : Dict = HTTPError __lowerCamelCase : Any = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=__a ) as mock_head: __lowerCamelCase : Any = cached_file(__a , 'conf' , _raise_exceptions_for_connection_errors=__a ) self.assertIsNone(__a ) # This check we did call the fake head request mock_head.assert_called() def snake_case_ ( self ): self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) ) def snake_case_ ( self ): # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(__a , 'is not a valid model identifier' ): get_file_from_repo('bert-base-case' , __a ) # The function raises if the revision does not exist. with self.assertRaisesRegex(__a , 'is not a valid git identifier' ): get_file_from_repo('bert-base-cased' , __a , revision='ahaha' ) __lowerCamelCase : Optional[int] = get_file_from_repo('bert-base-cased' , __a ) # The name is the cached name which is not very easy to test, so instead we load the content. __lowerCamelCase : int = json.loads(open(__a , 'r' ).read() ) self.assertEqual(config['hidden_size'] , 768 ) def snake_case_ ( self ): with tempfile.TemporaryDirectory() as tmp_dir: __lowerCamelCase : int = Path(__a ) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(__a , 'a.txt' ) , str(__a ) ) self.assertIsNone(get_file_from_repo(__a , 'b.txt' ) )
263
0
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ): __a : List[Any] = nn.functional.normalize(_a ) __a : int = nn.functional.normalize(_a ) return torch.mm(_a , normalized_text_embeds.t() ) class _UpperCamelCase( SCREAMING_SNAKE_CASE_ ): __SCREAMING_SNAKE_CASE : List[Any] = CLIPConfig __SCREAMING_SNAKE_CASE : Union[str, Any] = ['CLIPEncoderLayer'] def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : CLIPConfig ): '''simple docstring''' super().__init__(SCREAMING_SNAKE_CASE__ ) __a : Any = CLIPVisionModel(config.vision_config ) __a : Dict = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=SCREAMING_SNAKE_CASE__ ) __a : int = nn.Parameter(torch.ones(1_7 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE__ ) __a : Optional[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=SCREAMING_SNAKE_CASE__ ) __a : List[Any] = nn.Parameter(torch.ones(1_7 ) , requires_grad=SCREAMING_SNAKE_CASE__ ) __a : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=SCREAMING_SNAKE_CASE__ ) @torch.no_grad() def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ): '''simple docstring''' __a : Union[str, Any] = self.vision_model(SCREAMING_SNAKE_CASE__ )[1] # pooled_output __a : Any = self.visual_projection(SCREAMING_SNAKE_CASE__ ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __a : List[str] = cosine_distance(SCREAMING_SNAKE_CASE__ , self.special_care_embeds ).cpu().float().numpy() __a : List[str] = cosine_distance(SCREAMING_SNAKE_CASE__ , self.concept_embeds ).cpu().float().numpy() __a : Optional[Any] = [] __a : Any = image_embeds.shape[0] for i in range(SCREAMING_SNAKE_CASE__ ): __a : Optional[int] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images __a : Optional[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): __a : Optional[Any] = special_cos_dist[i][concept_idx] __a : Tuple = self.special_care_embeds_weights[concept_idx].item() __a : str = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) __a : List[str] = 0.01 for concept_idx in range(len(cos_dist[0] ) ): __a : Dict = cos_dist[i][concept_idx] __a : str = self.concept_embeds_weights[concept_idx].item() __a : Tuple = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(SCREAMING_SNAKE_CASE__ ) result.append(SCREAMING_SNAKE_CASE__ ) __a : Any = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : torch.FloatTensor ): '''simple docstring''' __a : Tuple = self.vision_model(SCREAMING_SNAKE_CASE__ )[1] # pooled_output __a : int = self.visual_projection(SCREAMING_SNAKE_CASE__ ) __a : Any = cosine_distance(SCREAMING_SNAKE_CASE__ , self.special_care_embeds ) __a : Optional[int] = cosine_distance(SCREAMING_SNAKE_CASE__ , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images __a : int = 0.0 __a : List[str] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) __a : int = torch.any(special_scores > 0 , dim=1 ) __a : int = special_care * 0.01 __a : int = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) __a : Tuple = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) __a : Optional[Any] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
47
"""simple docstring""" def snake_case ( _a: int )-> int: '''simple docstring''' if not isinstance(_a , _a ): raise ValueError('Input must be an integer' ) if input_num <= 0: raise ValueError('Input must be positive' ) return sum( divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 ) if __name__ == "__main__": import doctest doctest.testmod()
510
0
'''simple docstring''' import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowerCAmelCase_ : Union[str, Any] = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ lowerCAmelCase_ : Tuple = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ lowerCAmelCase_ : Dict = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ lowerCAmelCase_ : Optional[Any] = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ lowerCAmelCase_ : str = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE ( datasets.Metric ): '''simple docstring''' def snake_case__ ( self : Dict ) ->Tuple: '''simple docstring''' return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def snake_case__ ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Union[str, Any]=[1, 10, 100] , lowercase__ : Optional[int]=4 , lowercase__ : Any=3.0 ) ->Tuple: '''simple docstring''' if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor: _UpperCamelCase : Optional[int] = [] _UpperCamelCase : Optional[int] = Counter() _UpperCamelCase : int = 0 _UpperCamelCase : List[Any] = defaultdict(lowerCamelCase_ ) for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): for candidate in candidates: _UpperCamelCase : Any = candidate + """\n""" + test_case _UpperCamelCase : Optional[int] = (test_program, timeout, task_id, completion_id[task_id]) _UpperCamelCase : str = executor.submit(lowerCamelCase_ , *lowerCamelCase_ ) futures.append(lowerCamelCase_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(lowerCamelCase_ ): _UpperCamelCase : Any = future.result() results[result["task_id"]].append((result["completion_id"], result) ) _UpperCamelCase : Tuple = [], [] for result in results.values(): result.sort() _UpperCamelCase : Tuple = [r[1]["""passed"""] for r in result] total.append(len(lowerCamelCase_ ) ) correct.append(sum(lowerCamelCase_ ) ) _UpperCamelCase : Tuple = np.array(lowerCamelCase_ ) _UpperCamelCase : Optional[Any] = np.array(lowerCamelCase_ ) _UpperCamelCase : Dict = k _UpperCamelCase : List[Any] = {f'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> int: '''simple docstring''' def estimator(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): _UpperCamelCase : List[str] = itertools.repeat(lowerCamelCase_ ,len(lowerCamelCase_ ) ) else: assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) _UpperCamelCase : Any = iter(lowerCamelCase_ ) return np.array([estimator(int(lowerCamelCase_ ) ,int(lowerCamelCase_ ) ,lowerCamelCase_ ) for n, c in zip(lowerCamelCase_ ,lowerCamelCase_ )] )
713
'''simple docstring''' from __future__ import annotations lowerCAmelCase_ : Optional[Any] = """Muhammad Umer Farooq""" lowerCAmelCase_ : str = """MIT""" lowerCAmelCase_ : Optional[Any] = """1.0.0""" lowerCAmelCase_ : Union[str, Any] = """Muhammad Umer Farooq""" lowerCAmelCase_ : Any = """contact@muhammadumerfarooq.me""" lowerCAmelCase_ : Dict = """Alpha""" import re from html.parser import HTMLParser from urllib import parse import requests class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : List[Any] , lowercase__ : str ) ->None: '''simple docstring''' super().__init__() _UpperCamelCase : list[str] = [] _UpperCamelCase : int = domain def snake_case__ ( self : str , lowercase__ : str , lowercase__ : list[tuple[str, str | None]] ) ->None: '''simple docstring''' if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: _UpperCamelCase : Optional[Any] = parse.urljoin(self.domain , lowercase__ ) self.urls.append(lowercase__ ) def __A ( UpperCAmelCase ) -> str: '''simple docstring''' return ".".join(get_sub_domain_name(UpperCAmelCase ).split("." )[-2:] ) def __A ( UpperCAmelCase ) -> str: '''simple docstring''' return parse.urlparse(UpperCAmelCase ).netloc def __A ( UpperCAmelCase = "https://github.com" ) -> list[str]: '''simple docstring''' _UpperCamelCase : int = get_domain_name(UpperCAmelCase ) # Initialize the parser _UpperCamelCase : Any = Parser(UpperCAmelCase ) try: # Open URL _UpperCamelCase : Union[str, Any] = requests.get(UpperCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through _UpperCamelCase : int = set() for link in parser.urls: # open URL. # read = requests.get(link) try: _UpperCamelCase : Dict = requests.get(UpperCAmelCase ) # Get the valid email. _UpperCamelCase : List[str] = re.findall("[a-zA-Z0-9]+@" + domain ,read.text ) # If not in list then append it. for email in emails: valid_emails.add(UpperCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(UpperCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ : List[str] = emails_from_url("""https://github.com""") print(f"""{len(emails)} emails found:""") print("""\n""".join(sorted(emails)))
204
0
'''simple docstring''' from functools import lru_cache @lru_cache def __snake_case ( lowerCamelCase_ : int ): '''simple docstring''' if num < 0: raise ValueError("Number should not be negative." ) return 1 if num in (0, 1) else num * factorial(num - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
664
'''simple docstring''' from __future__ import annotations def __snake_case ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ): '''simple docstring''' if len(lowerCamelCase_ ) < k or k < 0: raise ValueError("Invalid Input" ) __magic_name__ = __magic_name__ = sum(array[:k] ) for i in range(len(lowerCamelCase_ ) - k ): __magic_name__ = current_sum - array[i] + array[i + k] __magic_name__ = max(lowerCamelCase_ , lowerCamelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod from random import randint testmod() __magic_name__ : List[str] =[randint(-10_00, 10_00) for i in range(1_00)] __magic_name__ : List[str] =randint(0, 1_10) print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
664
1
from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class UpperCamelCase_ : pass
719
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel UpperCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Tuple: '''simple docstring''' if os.path.exists(UpperCamelCase__ ): if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , 'config.json' ) ): os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) ) if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ): os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) else: os.makedirs(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=False ) -> str: '''simple docstring''' _snake_case = 2 if unlogit: _snake_case = torch.pow(UpperCamelCase__ , UpperCamelCase__ ) _snake_case = p * torch.log(UpperCamelCase__ ) _snake_case = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]: '''simple docstring''' logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) ) for row in range(len(UpperCamelCase__ ) ): if tensor.dtype != torch.long: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) ) def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=False ) -> Any: '''simple docstring''' _snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads _snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) _snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) if head_mask is None: _snake_case = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCamelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _snake_case = None _snake_case = 0.0 _snake_case = 0.0 for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): _snake_case = tuple(t.to(args.device ) for t in inputs ) ((_snake_case) , ) = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _snake_case = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _snake_case , _snake_case , _snake_case = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCamelCase__ ): _snake_case = entropy(attn.detach() , UpperCamelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _snake_case = 2 _snake_case = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: _snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(UpperCamelCase__ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(UpperCamelCase__ ) logger.info('Head ranked by importance scores' ) _snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _snake_case = torch.arange( head_importance.numel() , device=args.device ) _snake_case = head_ranks.view_as(UpperCamelCase__ ) print_ad_tensor(UpperCamelCase__ ) return attn_entropy, head_importance, total_loss def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' _snake_case , _snake_case , _snake_case = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ ) _snake_case = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold ) _snake_case = torch.ones_like(UpperCamelCase__ ) _snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _snake_case = original_score while current_score >= original_score * args.masking_threshold: _snake_case = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _snake_case = float('Inf' ) _snake_case = head_importance.view(-1 ).sort()[1] if len(UpperCamelCase__ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads _snake_case = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) _snake_case = new_head_mask.view(-1 ) _snake_case = 0.0 _snake_case = new_head_mask.view_as(UpperCamelCase__ ) _snake_case = new_head_mask.clone().detach() print_ad_tensor(UpperCamelCase__ ) # Compute metric and head importance again _snake_case , _snake_case , _snake_case = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ ) _snake_case = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('Final head mask' ) print_ad_tensor(UpperCamelCase__ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[Any]: '''simple docstring''' _snake_case = datetime.now() _snake_case , _snake_case , _snake_case = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ ) _snake_case = 1 / loss _snake_case = datetime.now() - before_time _snake_case = sum(p.numel() for p in model.parameters() ) _snake_case = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _snake_case = [ v, ] assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCamelCase__ ) _snake_case = sum(p.numel() for p in model.parameters() ) _snake_case = datetime.now() _snake_case , _snake_case , _snake_case = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , ) _snake_case = 1 / loss _snake_case = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 ) save_model(UpperCamelCase__ , args.output_dir ) def lowerCamelCase__ ( ) -> str: '''simple docstring''' _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=128 , type=UpperCamelCase__ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' ) parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 ) parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' ) _snake_case = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) _snake_case = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _snake_case = torch.device('cuda' , args.local_rank ) _snake_case = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _snake_case = nn.parallel.DistributedDataParallel( UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ ) elif args.n_gpu > 1: _snake_case = nn.DataParallel(UpperCamelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , UpperCamelCase__ ) # Prepare dataset _snake_case = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _snake_case = (torch.from_numpy(UpperCamelCase__ ),) _snake_case = TensorDataset(*UpperCamelCase__ ) _snake_case = RandomSampler(UpperCamelCase__ ) _snake_case = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _snake_case = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
541
0
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class SCREAMING_SNAKE_CASE : '''simple docstring''' def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]=13 , UpperCAmelCase_ : List[Any]=32 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Any=[1, 2, 1] , UpperCAmelCase_ : Any=[2, 2, 4] , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Union[str, Any]=2.0 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : int=1E-5 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Dict=["stage1", "stage2", "stage3"] , UpperCAmelCase_ : Optional[int]=[1, 2, 3] , ): SCREAMING_SNAKE_CASE : Any = parent SCREAMING_SNAKE_CASE : Optional[int] = batch_size SCREAMING_SNAKE_CASE : List[str] = image_size SCREAMING_SNAKE_CASE : Optional[Any] = patch_size SCREAMING_SNAKE_CASE : List[str] = num_channels SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim SCREAMING_SNAKE_CASE : Tuple = depths SCREAMING_SNAKE_CASE : List[str] = num_heads SCREAMING_SNAKE_CASE : Any = window_size SCREAMING_SNAKE_CASE : List[Any] = mlp_ratio SCREAMING_SNAKE_CASE : Optional[int] = qkv_bias SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[str] = drop_path_rate SCREAMING_SNAKE_CASE : int = hidden_act SCREAMING_SNAKE_CASE : int = use_absolute_embeddings SCREAMING_SNAKE_CASE : Optional[int] = patch_norm SCREAMING_SNAKE_CASE : Dict = layer_norm_eps SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range SCREAMING_SNAKE_CASE : Union[str, Any] = is_training SCREAMING_SNAKE_CASE : str = scope SCREAMING_SNAKE_CASE : Dict = use_labels SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size SCREAMING_SNAKE_CASE : Optional[Any] = encoder_stride SCREAMING_SNAKE_CASE : int = out_features SCREAMING_SNAKE_CASE : int = out_indices def _A ( self : Optional[Any] ): SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE : List[Any] = None if self.use_labels: SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : Optional[Any] = self.get_config() return config, pixel_values, labels def _A ( self : List[str] ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _A ( self : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE : Dict = MaskFormerSwinModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Tuple = model(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) SCREAMING_SNAKE_CASE : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _A ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ): SCREAMING_SNAKE_CASE : int = MaskFormerSwinBackbone(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : str = model(UpperCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = ["stem"] SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinBackbone(config=UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = config_and_inputs SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase_ : Optional[Any] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) UpperCamelCase_ : Tuple = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {} UpperCamelCase_ : Any = False UpperCamelCase_ : Tuple = False UpperCamelCase_ : int = False UpperCamelCase_ : int = False UpperCamelCase_ : int = False def _A ( self : List[str] ): SCREAMING_SNAKE_CASE : List[str] = MaskFormerSwinModelTester(self ) SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( "`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with" " `nn.DataParallel`" ) ) def _A ( self : Optional[Any] ): pass def _A ( self : Union[str, Any] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _A ( self : Union[str, Any] ): return def _A ( self : Dict ): SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def _A ( self : Any ): SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*UpperCAmelCase_ ) @unittest.skip("Swin does not use inputs_embeds" ) def _A ( self : Tuple ): pass @unittest.skip("Swin does not support feedforward chunking" ) def _A ( self : Any ): pass def _A ( self : int ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[str] = model_class(UpperCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Tuple = model_class(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCAmelCase_ ) @unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" ) def _A ( self : Optional[Any] ): pass @unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" ) def _A ( self : List[str] ): pass def _A ( self : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ): SCREAMING_SNAKE_CASE : Optional[Any] = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) ) SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states SCREAMING_SNAKE_CASE : List[Any] = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # Swin has a different seq_length SCREAMING_SNAKE_CASE : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _A ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Dict = True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Optional[int] = True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : List[str] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : Optional[Any] = 3 SCREAMING_SNAKE_CASE : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) SCREAMING_SNAKE_CASE : List[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) SCREAMING_SNAKE_CASE : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) SCREAMING_SNAKE_CASE : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Optional[Any] = True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE : Any = True self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) ) @unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" ) def _A ( self : List[Any] ): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _A ( self : int ): pass @unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" ) def _A ( self : Optional[int] ): pass def _A ( self : Optional[int] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(UpperCAmelCase_ : Any ): SCREAMING_SNAKE_CASE : Optional[Any] = 0 return t def check_equivalence(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]={} ): with torch.no_grad(): SCREAMING_SNAKE_CASE : List[Any] = model(**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Optional[Any] = model(**UpperCAmelCase_ , return_dict=UpperCAmelCase_ , **UpperCAmelCase_ ).to_tuple() def recursive_check(UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple ): if isinstance(UpperCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase_ , UpperCAmelCase_ ): recursive_check(UpperCAmelCase_ , UpperCAmelCase_ ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(UpperCAmelCase_ , UpperCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(UpperCAmelCase_ ) , set_nan_tensor_to_zero(UpperCAmelCase_ ) , atol=1E-5 ) , msg=( "Tuple and dict output are not equal. Difference:" f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:''' f''' {torch.isnan(UpperCAmelCase_ ).any()} and `inf`: {torch.isinf(UpperCAmelCase_ )}. Dict has''' f''' `nan`: {torch.isnan(UpperCAmelCase_ ).any()} and `inf`: {torch.isinf(UpperCAmelCase_ )}.''' ) , ) recursive_check(UpperCAmelCase_ , UpperCAmelCase_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE : Any = model_class(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() SCREAMING_SNAKE_CASE : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {"output_hidden_states": True} ) SCREAMING_SNAKE_CASE : int = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ ) check_equivalence(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , {"output_hidden_states": True} ) @require_torch class SCREAMING_SNAKE_CASE ( unittest.TestCase , lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : List[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else () UpperCamelCase_ : Optional[int] = MaskFormerSwinConfig def _A ( self : str ): SCREAMING_SNAKE_CASE : str = MaskFormerSwinModelTester(self ) def _A ( self : Dict ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE : int = inputs_dict["pixel_values"].shape[0] for backbone_class in self.all_model_classes: SCREAMING_SNAKE_CASE : List[Any] = backbone_class(UpperCAmelCase_ ) backbone.to(UpperCAmelCase_ ) backbone.eval() SCREAMING_SNAKE_CASE : Union[str, Any] = backbone(**UpperCAmelCase_ ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , UpperCAmelCase_ ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True SCREAMING_SNAKE_CASE : Optional[int] = backbone(**UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: SCREAMING_SNAKE_CASE : Any = backbone(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_ ) self.assertIsNotNone(outputs.attentions )
62
import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def __A ( __lowerCamelCase ) -> List[str]: return 1.0 / (1.0 + np.exp(-_outputs )) def __A ( __lowerCamelCase ) -> List[str]: a = np.max(_outputs , axis=-1 , keepdims=__lowerCamelCase ) a = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__lowerCamelCase ) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = '''sigmoid''' UpperCamelCase__ = '''softmax''' UpperCamelCase__ = '''none''' @add_end_docstrings( __magic_name__ , r''' return_all_scores (`bool`, *optional*, defaults to `False`): Whether to return all prediction scores or just the one of the predicted class. function_to_apply (`str`, *optional*, defaults to `"default"`): The function to apply to the model outputs in order to retrieve the scores. Accepts four different values: - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model has several labels, will apply the softmax function on the output. - `"sigmoid"`: Applies the sigmoid function on the output. - `"softmax"`: Applies the softmax function on the output. - `"none"`: Does not apply any function on the output. ''' , ) class __lowerCAmelCase ( __magic_name__ ): UpperCamelCase__ = False UpperCamelCase__ = ClassificationFunction.NONE def __init__( self :List[str] , **__magic_name__ :List[Any] ): '''simple docstring''' super().__init__(**__magic_name__ ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def lowerCamelCase__ ( self :Any , __magic_name__ :int=None , __magic_name__ :Any=None , __magic_name__ :Union[str, Any]="" , **__magic_name__ :Tuple ): '''simple docstring''' a = tokenizer_kwargs a = {} if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None: a = self.model.config.return_all_scores if isinstance(__magic_name__ , __magic_name__ ) or top_k is None: a = top_k a = False elif return_all_scores is not None: warnings.warn( """`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of""" """ `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , __magic_name__ , ) if return_all_scores: a = None else: a = 1 if isinstance(__magic_name__ , __magic_name__ ): a = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: a = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self :Dict , *__magic_name__ :Optional[int] , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = super().__call__(*__magic_name__ , **__magic_name__ ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. a = """top_k""" not in kwargs if isinstance(args[0] , __magic_name__ ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[Any] , **__magic_name__ :Optional[Any] ): '''simple docstring''' a = self.framework if isinstance(__magic_name__ , __magic_name__ ): return self.tokenizer(**__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1 and isinstance(inputs[0] , __magic_name__ ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__magic_name__ , **__magic_name__ ) elif isinstance(__magic_name__ , __magic_name__ ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( """The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a""" """ dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" ) return self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) def lowerCamelCase__ ( self :List[str] , __magic_name__ :Tuple ): '''simple docstring''' return self.model(**__magic_name__ ) def lowerCamelCase__ ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :int=None , __magic_name__ :Union[str, Any]=1 , __magic_name__ :Tuple=True ): '''simple docstring''' if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: a = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: a = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None: a = self.model.config.function_to_apply else: a = ClassificationFunction.NONE a = model_outputs["""logits"""][0] a = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: a = sigmoid(__magic_name__ ) elif function_to_apply == ClassificationFunction.SOFTMAX: a = softmax(__magic_name__ ) elif function_to_apply == ClassificationFunction.NONE: a = outputs else: raise ValueError(F'Unrecognized `function_to_apply` argument: {function_to_apply}' ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} a = [ {"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(__magic_name__ ) ] if not _legacy: dict_scores.sort(key=lambda __magic_name__ : x["score"] , reverse=__magic_name__ ) if top_k is not None: a = dict_scores[:top_k] return dict_scores
468
0
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Optional[Any] = LayoutLMTokenizer __lowercase : List[Any] = LayoutLMTokenizerFast __lowercase : str = True __lowercase : List[str] = True def lowerCamelCase__ ( self ): super().setUp() _snake_case : Dict = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] _snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def lowerCamelCase__ ( self , **snake_case_ ): return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case_ ) def lowerCamelCase__ ( self , snake_case_ ): _snake_case : Dict = "UNwant\u00E9d,running" _snake_case : List[Any] = "unwanted, running" return input_text, output_text def lowerCamelCase__ ( self ): _snake_case : Tuple = self.tokenizer_class(self.vocab_file ) _snake_case : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [7, 4, 5, 10, 8, 9] ) def lowerCamelCase__ ( self ): pass
702
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _snake_case , unittest.TestCase): __lowercase : Any = TextToVideoSDPipeline __lowercase : str = TEXT_TO_IMAGE_PARAMS __lowercase : int = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. __lowercase : Optional[int] = frozenset( [ """num_inference_steps""", """generator""", """latents""", """return_dict""", """callback""", """callback_steps""", ]) def lowerCamelCase__ ( self ): torch.manual_seed(0 ) _snake_case : str = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , ) _snake_case : List[Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , ) torch.manual_seed(0 ) _snake_case : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) _snake_case : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="gelu" , projection_dim=5_12 , ) _snake_case : Tuple = CLIPTextModel(snake_case_ ) _snake_case : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) _snake_case : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def lowerCamelCase__ ( self , snake_case_ , snake_case_=0 ): if str(snake_case_ ).startswith("mps" ): _snake_case : str = torch.manual_seed(snake_case_ ) else: _snake_case : Union[str, Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) _snake_case : str = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def lowerCamelCase__ ( self ): _snake_case : int = "cpu" # ensure determinism for the device-dependent torch.Generator _snake_case : Optional[Any] = self.get_dummy_components() _snake_case : Tuple = TextToVideoSDPipeline(**snake_case_ ) _snake_case : List[str] = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) _snake_case : int = self.get_dummy_inputs(snake_case_ ) _snake_case : Union[str, Any] = "np" _snake_case : Dict = sd_pipe(**snake_case_ ).frames _snake_case : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _snake_case : Dict = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ ( self ): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=3E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def lowerCamelCase__ ( self ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case_ , expected_max_diff=1E-2 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def lowerCamelCase__ ( self ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def lowerCamelCase__ ( self ): pass def lowerCamelCase__ ( self ): return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase): def lowerCamelCase__ ( self ): _snake_case : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" ) _snake_case : int = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) _snake_case : Tuple = pipe.to("cuda" ) _snake_case : List[Any] = "Spiderman is surfing" _snake_case : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : int = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=25 , output_type="pt" ).frames _snake_case : int = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2 def lowerCamelCase__ ( self ): _snake_case : Any = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) _snake_case : str = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" ) _snake_case : int = pipe.to("cuda" ) _snake_case : Any = "Spiderman is surfing" _snake_case : str = torch.Generator(device="cpu" ).manual_seed(0 ) _snake_case : Any = pipe(snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="pt" ).frames _snake_case : Optional[int] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5E-2
87
0
'''simple docstring''' from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ): '''simple docstring''' def _lowercase ( self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ): """simple docstring""" if tokenize_kwargs is None: _lowerCAmelCase = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) _lowerCAmelCase = truncation _lowerCAmelCase = tokenize_kwargs _lowerCAmelCase = {} if return_tensors is not None: _lowerCAmelCase = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self , _lowercase , **_lowercase ): """simple docstring""" _lowerCAmelCase = self.framework _lowerCAmelCase = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase ) return model_inputs def _lowercase ( self , _lowercase ): """simple docstring""" _lowerCAmelCase = self.model(**_lowercase ) return model_outputs def _lowercase ( self , _lowercase , _lowercase=False ): """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self , *_lowercase , **_lowercase ): """simple docstring""" return super().__call__(*_lowercase , **_lowercase )
5
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : str , lowerCamelCase_ : int ): # save results if os.path.exists(lowerCamelCase_ ): if os.path.exists(os.path.join(lowerCamelCase_ , 'config.json' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'config.json' ) ): os.remove(os.path.join(lowerCamelCase_ , 'config.json' ) ) if os.path.exists(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile( os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ): os.remove(os.path.join(lowerCamelCase_ , 'pytorch_model.bin' ) ) else: os.makedirs(lowerCamelCase_ ) model.save_pretrained(lowerCamelCase_ ) def UpperCAmelCase__ ( lowerCamelCase_ : int , lowerCamelCase_ : Any=False ): __a : Dict = 2 if unlogit: __a : Optional[Any] = torch.pow(lowerCamelCase_ , lowerCamelCase_ ) __a : Any = p * torch.log(lowerCamelCase_ ) __a : Union[str, Any] = 0 return -plogp.sum(dim=-1 ) def UpperCAmelCase__ ( lowerCamelCase_ : Any ): logger.info('lv, h >\t' + '\t'.join(f'''{x + 1}''' for x in range(len(lowerCamelCase_ ) ) ) ) for row in range(len(lowerCamelCase_ ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '\t'.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def UpperCAmelCase__ ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any , lowerCamelCase_ : int , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : List[Any]=False ): __a , __a : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads __a : str = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) __a : int = torch.zeros(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) if head_mask is None: __a : Union[str, Any] = torch.ones(lowerCamelCase_ , lowerCamelCase_ ).to(args.device ) head_mask.requires_grad_(requires_grad=lowerCamelCase_ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: __a : Any = None __a : Optional[int] = 0.0 __a : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(lowerCamelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ): __a : Dict = tuple(t.to(args.device ) for t in inputs ) ((__a) , ) : Dict = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) __a : List[Any] = model(lowerCamelCase_ , labels=lowerCamelCase_ , head_mask=lowerCamelCase_ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) __a , __a , __a : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(lowerCamelCase_ ): __a : List[str] = entropy(attn.detach() , lowerCamelCase_ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(lowerCamelCase_ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: __a : Optional[Any] = 2 __a : Union[str, Any] = torch.pow(torch.pow(lowerCamelCase_ , lowerCamelCase_ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20 if not args.dont_normalize_global_importance: __a : List[str] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('Attention entropies' ) print_ad_tensor(lowerCamelCase_ ) if compute_importance: logger.info('Head importance scores' ) print_ad_tensor(lowerCamelCase_ ) logger.info('Head ranked by importance scores' ) __a : Optional[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) __a : str = torch.arange( head_importance.numel() , device=args.device ) __a : Tuple = head_ranks.view_as(lowerCamelCase_ ) print_ad_tensor(lowerCamelCase_ ) return attn_entropy, head_importance, total_loss def UpperCAmelCase__ ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ): __a , __a , __a : Optional[int] = compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ ) __a : Tuple = 1 / loss # instead of downsteam score use the LM loss logger.info('Pruning: original score: %f, threshold: %f' , lowerCamelCase_ , original_score * args.masking_threshold ) __a : Tuple = torch.ones_like(lowerCamelCase_ ) __a : int = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) __a : Tuple = original_score while current_score >= original_score * args.masking_threshold: __a : Optional[Any] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads __a : List[str] = float('Inf' ) __a : List[Any] = head_importance.view(-1 ).sort()[1] if len(lowerCamelCase_ ) <= num_to_mask: print('BREAK BY num_to_mask' ) break # mask heads __a : Any = current_heads_to_mask[:num_to_mask] logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) ) __a : int = new_head_mask.view(-1 ) __a : Tuple = 0.0 __a : int = new_head_mask.view_as(lowerCamelCase_ ) __a : Optional[int] = new_head_mask.clone().detach() print_ad_tensor(lowerCamelCase_ ) # Compute metric and head importance again __a , __a , __a : int = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[Any] = 1 / loss logger.info( 'Masking: current score: %f, remaining heads %d (%.1f percents)' , lowerCamelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , ) logger.info('Final head mask' ) print_ad_tensor(lowerCamelCase_ ) np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() ) return head_mask def UpperCAmelCase__ ( lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ): __a : List[Any] = datetime.now() __a , __a , __a : List[str] = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ ) __a : List[str] = 1 / loss __a : List[Any] = datetime.now() - before_time __a : List[str] = sum(p.numel() for p in model.parameters() ) __a : Dict = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase_ ) ) } for k, v in heads_to_prune.items(): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): __a : Tuple = [ v, ] assert sum(len(lowerCamelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(lowerCamelCase_ ) __a : Optional[Any] = sum(p.numel() for p in model.parameters() ) __a : Tuple = datetime.now() __a , __a , __a : Tuple = compute_heads_importance( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , compute_entropy=lowerCamelCase_ , compute_importance=lowerCamelCase_ , head_mask=lowerCamelCase_ , actually_pruned=lowerCamelCase_ , ) __a : Optional[Any] = 1 / loss __a : List[Any] = datetime.now() - before_time logger.info( 'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , lowerCamelCase_ , lowerCamelCase_ , pruned_num_params / original_num_params * 1_0_0 , ) logger.info('Pruning: score with masking: %f score with pruning: %f' , lowerCamelCase_ , lowerCamelCase_ ) logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_0_0 ) save_model(lowerCamelCase_ , args.output_dir ) def UpperCAmelCase__ ( ): __a : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--data_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , ) parser.add_argument( '--model_name_or_path' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , ) parser.add_argument( '--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , ) # Other parameters parser.add_argument( '--config_name' , default='' , type=lowerCamelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , ) parser.add_argument( '--tokenizer_name' , default='' , type=lowerCamelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , ) parser.add_argument( '--cache_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , ) parser.add_argument( '--data_subset' , type=lowerCamelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' ) parser.add_argument( '--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' ) parser.add_argument( '--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' ) parser.add_argument( '--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' ) parser.add_argument( '--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , ) parser.add_argument( '--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' ) parser.add_argument( '--masking_threshold' , default=0.9 , type=lowerCamelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , ) parser.add_argument( '--masking_amount' , default=0.1 , type=lowerCamelCase_ , help='Amount to heads to masking at each masking step.' ) parser.add_argument('--metric_name' , default='acc' , type=lowerCamelCase_ , help='Metric to use for head masking.' ) parser.add_argument( '--max_seq_length' , default=1_2_8 , type=lowerCamelCase_ , help=( 'The maximum total input sequence length after WordPiece tokenization. \n' 'Sequences longer than this will be truncated, sequences shorter padded.' ) , ) parser.add_argument('--batch_size' , default=1 , type=lowerCamelCase_ , help='Batch size.' ) parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 ) parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='local_rank for distributed training on gpus' ) parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' ) parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) __a : Optional[Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: __a : List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' ) __a : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) __a : Union[str, Any] = torch.device('cuda' , args.local_rank ) __a : Any = 1 torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) __a : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: __a : List[Any] = nn.parallel.DistributedDataParallel( lowerCamelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase_ ) elif args.n_gpu > 1: __a : Union[str, Any] = nn.DataParallel(lowerCamelCase_ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ ) torch.save(lowerCamelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) ) logger.info('Training/evaluation parameters %s' , lowerCamelCase_ ) # Prepare dataset __a : Tuple = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) __a : str = (torch.from_numpy(lowerCamelCase_ ),) __a : List[str] = TensorDataset(*lowerCamelCase_ ) __a : Optional[Any] = RandomSampler(lowerCamelCase_ ) __a : Union[str, Any] = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: __a : Union[str, Any] = mask_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) prune_heads(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) if __name__ == "__main__": main()
47
0
from typing import List, Union import numpy as np from ..tokenization_utils import TruncationStrategy from ..utils import add_end_docstrings, logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline _snake_case = logging.get_logger(__name__) class lowerCAmelCase ( lowercase_ ): def UpperCAmelCase ( self :List[Any] , _lowercase :List[Any] ): '''simple docstring''' if isinstance(_lowercase , _lowercase ): lowercase__ = [label.strip() for label in labels.split("," ) if label.strip()] return labels def __call__( self :Union[str, Any] , _lowercase :str , _lowercase :Optional[int] , _lowercase :List[Any] ): '''simple docstring''' if len(_lowercase ) == 0 or len(_lowercase ) == 0: raise ValueError("You must include at least one label and at least one sequence." ) if hypothesis_template.format(labels[0] ) == hypothesis_template: raise ValueError( ( "The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. " "Make sure the passed template includes formatting syntax such as {{}} where the label should go." ).format(_lowercase ) ) if isinstance(_lowercase , _lowercase ): lowercase__ = [sequences] lowercase__ = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(_lowercase )] for label in labels] ) return sequence_pairs, sequences @add_end_docstrings(lowercase_ ) class lowerCAmelCase ( lowercase_ ): def __init__( self :List[Any] , _lowercase :List[str]=ZeroShotClassificationArgumentHandler() , *_lowercase :List[Any] , **_lowercase :Union[str, Any] ): '''simple docstring''' lowercase__ = args_parser super().__init__(*_lowercase , **_lowercase ) if self.entailment_id == -1: logger.warning( "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." ) @property def UpperCAmelCase ( self :Tuple ): '''simple docstring''' for label, ind in self.model.config.labelaid.items(): if label.lower().startswith("entail" ): return ind return -1 def UpperCAmelCase ( self :str , _lowercase :List[str] , _lowercase :Union[str, Any]=True , _lowercase :int=True , _lowercase :List[str]=TruncationStrategy.ONLY_FIRST , **_lowercase :int ): '''simple docstring''' lowercase__ = self.framework if self.tokenizer.pad_token is None: # Override for tokenizers not supporting padding logger.error( "Tokenizer was not supporting padding necessary for zero-shot, attempting to use " " `pad_token=eos_token`" ) lowercase__ = self.tokenizer.eos_token try: lowercase__ = self.tokenizer( _lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=_lowercase , ) except Exception as e: if "too short" in str(_lowercase ): # tokenizers might yell that we want to truncate # to a value that is not even reached by the input. # In that case we don't want to truncate. # It seems there's not a really better way to catch that # exception. lowercase__ = self.tokenizer( _lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , ) else: raise e return inputs def UpperCAmelCase ( self :Optional[Any] , **_lowercase :Optional[Any] ): '''simple docstring''' if kwargs.get("multi_class" , _lowercase ) is not None: lowercase__ = kwargs["multi_class"] logger.warning( "The `multi_class` argument has been deprecated and renamed to `multi_label`. " "`multi_class` will be removed in a future version of Transformers." ) lowercase__ = {} if "candidate_labels" in kwargs: lowercase__ = self._args_parser._parse_labels(kwargs["candidate_labels"] ) if "hypothesis_template" in kwargs: lowercase__ = kwargs["hypothesis_template"] lowercase__ = {} if "multi_label" in kwargs: lowercase__ = kwargs["multi_label"] return preprocess_params, {}, postprocess_params def __call__( self :Optional[Any] , _lowercase :Union[str, List[str]] , *_lowercase :Any , **_lowercase :Tuple , ): '''simple docstring''' if len(_lowercase ) == 0: pass elif len(_lowercase ) == 1 and "candidate_labels" not in kwargs: lowercase__ = args[0] else: raise ValueError(f'''Unable to understand extra arguments {args}''' ) return super().__call__(_lowercase , **_lowercase ) def UpperCAmelCase ( self :List[str] , _lowercase :List[str] , _lowercase :List[str]=None , _lowercase :Tuple="This example is {}." ): '''simple docstring''' lowercase__ , lowercase__ = self._args_parser(_lowercase , _lowercase , _lowercase ) for i, (candidate_label, sequence_pair) in enumerate(zip(_lowercase , _lowercase ) ): lowercase__ = self._parse_and_tokenize([sequence_pair] ) yield { "candidate_label": candidate_label, "sequence": sequences[0], "is_last": i == len(_lowercase ) - 1, **model_input, } def UpperCAmelCase ( self :Tuple , _lowercase :int ): '''simple docstring''' lowercase__ = inputs["candidate_label"] lowercase__ = inputs["sequence"] lowercase__ = {k: inputs[k] for k in self.tokenizer.model_input_names} lowercase__ = self.model(**_lowercase ) lowercase__ = { "candidate_label": candidate_label, "sequence": sequence, "is_last": inputs["is_last"], **outputs, } return model_outputs def UpperCAmelCase ( self :Optional[Any] , _lowercase :Dict , _lowercase :Optional[Any]=False ): '''simple docstring''' lowercase__ = [outputs["candidate_label"] for outputs in model_outputs] lowercase__ = [outputs["sequence"] for outputs in model_outputs] lowercase__ = np.concatenate([output["logits"].numpy() for output in model_outputs] ) lowercase__ = logits.shape[0] lowercase__ = len(_lowercase ) lowercase__ = N // n lowercase__ = logits.reshape((num_sequences, n, -1) ) if multi_label or len(_lowercase ) == 1: # softmax over the entailment vs. contradiction dim for each label independently lowercase__ = self.entailment_id lowercase__ = -1 if entailment_id == 0 else 0 lowercase__ = reshaped_outputs[..., [contradiction_id, entailment_id]] lowercase__ = np.exp(_lowercase ) / np.exp(_lowercase ).sum(-1 , keepdims=_lowercase ) lowercase__ = scores[..., 1] else: # softmax the "entailment" logits over all candidate labels lowercase__ = reshaped_outputs[..., self.entailment_id] lowercase__ = np.exp(_lowercase ) / np.exp(_lowercase ).sum(-1 , keepdims=_lowercase ) lowercase__ = list(reversed(scores[0].argsort() ) ) return { "sequence": sequences[0], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[0, top_inds].tolist(), }
715
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class lowerCAmelCase ( lowercase_ ): __lowerCamelCase = 'convbert' def __init__( self :List[Any] , _lowercase :int=3_05_22 , _lowercase :List[Any]=7_68 , _lowercase :List[str]=12 , _lowercase :Tuple=12 , _lowercase :Optional[int]=30_72 , _lowercase :List[Any]="gelu" , _lowercase :str=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[int]=5_12 , _lowercase :Optional[Any]=2 , _lowercase :List[str]=0.02 , _lowercase :Optional[Any]=1e-12 , _lowercase :Optional[Any]=1 , _lowercase :Tuple=0 , _lowercase :Optional[int]=2 , _lowercase :Optional[Any]=7_68 , _lowercase :str=2 , _lowercase :Optional[Any]=9 , _lowercase :List[Any]=1 , _lowercase :Tuple=None , **_lowercase :Optional[int] , ): '''simple docstring''' super().__init__( pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , ) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = embedding_size lowercase__ = head_ratio lowercase__ = conv_kernel_size lowercase__ = num_groups lowercase__ = classifier_dropout class lowerCAmelCase ( lowercase_ ): @property def UpperCAmelCase ( self :Optional[Any] ): '''simple docstring''' if self.task == "multiple-choice": lowercase__ = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
611
0
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class A ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): '''simple docstring''' A__ = IFPipeline A__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} A__ = TEXT_TO_IMAGE_BATCH_PARAMS A__ = PipelineTesterMixin.required_optional_params - {'''latents'''} def lowerCamelCase__ (self : int ) -> Optional[int]: """simple docstring""" return self._get_dummy_components() def lowerCamelCase__ (self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : str=0 ) -> Optional[Any]: """simple docstring""" if str(_UpperCAmelCase ).startswith("""mps""" ): lowercase__ = torch.manual_seed(_UpperCAmelCase ) else: lowercase__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) lowercase__ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def lowerCamelCase__ (self : int ) -> List[Any]: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" ) def lowerCamelCase__ (self : Dict ) -> List[Any]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1 ) def lowerCamelCase__ (self : str ) -> int: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def lowerCamelCase__ (self : int ) -> Any: """simple docstring""" self._test_save_load_local() def lowerCamelCase__ (self : str ) -> Optional[Any]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def lowerCamelCase__ (self : Optional[int] ) -> Optional[int]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @slow @require_torch_gpu class A ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase__ (self : Tuple ) -> str: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa ) lowercase__ = IFSuperResolutionPipeline.from_pretrained( """DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_UpperCAmelCase , tokenizer=_UpperCAmelCase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("""cuda""" ) lowercase__ , lowercase__ = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() lowercase__ = None lowercase__ = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img lowercase__ = IFImgaImgPipeline(**pipe_a.components ) lowercase__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting lowercase__ = IFInpaintingPipeline(**pipe_a.components ) lowercase__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> List[Any]: """simple docstring""" _start_torch_memory_measurement() lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase__ = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , ) lowercase__ = output.images[0] assert image.shape == (64, 64, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 lowercase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Any: """simple docstring""" _start_torch_memory_measurement() lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase__ = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , ) lowercase__ = output.images[0] assert image.shape == (64, 64, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowercase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase__ (self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> Dict: """simple docstring""" _start_torch_memory_measurement() lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_UpperCAmelCase ) lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase__ = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , num_inference_steps=2 , generator=_UpperCAmelCase , output_type="""np""" , ) lowercase__ = output.images[0] assert image.shape == (64, 64, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 lowercase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) # pipeline 2 _start_torch_memory_measurement() lowercase__ = torch.Generator(device="""cpu""" ).manual_seed(0 ) lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_UpperCAmelCase ) lowercase__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_UpperCAmelCase ) lowercase__ = pipe_a( prompt_embeds=_UpperCAmelCase , negative_prompt_embeds=_UpperCAmelCase , image=_UpperCAmelCase , mask_image=_UpperCAmelCase , original_image=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , ) lowercase__ = output.images[0] assert image.shape == (256, 256, 3) lowercase__ = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 lowercase__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" ) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase ) def UpperCamelCase ( ) -> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
15
from math import log from scipy.constants import Boltzmann, physical_constants A : Any = 3_0_0 # TEMPERATURE (unit = K) def UpperCamelCase ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ) -> float: """simple docstring""" if donor_conc <= 0: raise ValueError("""Donor concentration should be positive""" ) elif acceptor_conc <= 0: raise ValueError("""Acceptor concentration should be positive""" ) elif intrinsic_conc <= 0: raise ValueError("""Intrinsic concentration should be positive""" ) elif donor_conc <= intrinsic_conc: raise ValueError( """Donor concentration should be greater than intrinsic concentration""" ) elif acceptor_conc <= intrinsic_conc: raise ValueError( """Acceptor concentration should be greater than intrinsic concentration""" ) else: return ( Boltzmann * T * log((donor_conc * acceptor_conc) / intrinsic_conc**2 ) / physical_constants["electron volt"][0] ) if __name__ == "__main__": import doctest doctest.testmod()
15
1
from .testing import ( are_the_same_tensors, execute_subprocess_async, require_bnb, require_cpu, require_cuda, require_huggingface_suite, require_mps, require_multi_gpu, require_multi_xpu, require_safetensors, require_single_gpu, require_single_xpu, require_torch_min_version, require_tpu, require_xpu, skip, slow, ) from .training import RegressionDataset, RegressionModel, RegressionModelaXPU from .scripts import test_script, test_sync, test_ops # isort: skip
704
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset _snake_case = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCAmelCase_ ( nn.Module ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE ) -> Any: super().__init__() __UpperCamelCase = torchvision.models.resnetaaa(pretrained=_SCREAMING_SNAKE_CASE ) __UpperCamelCase = list(model.children() )[:-2] __UpperCamelCase = nn.Sequential(*_SCREAMING_SNAKE_CASE ) __UpperCamelCase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def __lowercase( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]: # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 __UpperCamelCase = self.pool(self.model(_SCREAMING_SNAKE_CASE ) ) __UpperCamelCase = torch.flatten(_SCREAMING_SNAKE_CASE , start_dim=2 ) __UpperCamelCase = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCAmelCase_ ( _lowercase ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int: __UpperCamelCase = [json.loads(_SCREAMING_SNAKE_CASE ) for l in open(_SCREAMING_SNAKE_CASE )] __UpperCamelCase = os.path.dirname(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = tokenizer __UpperCamelCase = labels __UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) __UpperCamelCase = max_seq_length __UpperCamelCase = transforms def __len__( self ) -> Tuple: return len(self.data ) def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: __UpperCamelCase = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_SCREAMING_SNAKE_CASE ) ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = sentence[0], sentence[1:-1], sentence[-1] __UpperCamelCase = sentence[: self.max_seq_length] __UpperCamelCase = torch.zeros(self.n_classes ) __UpperCamelCase = 1 __UpperCamelCase = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' ) __UpperCamelCase = self.transforms(_SCREAMING_SNAKE_CASE ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def __lowercase( self ) -> Tuple: __UpperCamelCase = Counter() for row in self.data: label_freqs.update(row['label'] ) return label_freqs def _a ( __lowercase ) -> int: """simple docstring""" __UpperCamelCase = [len(row['sentence'] ) for row in batch] __UpperCamelCase , __UpperCamelCase = len(__lowercase ), max(__lowercase ) __UpperCamelCase = torch.zeros(__lowercase , __lowercase , dtype=torch.long ) __UpperCamelCase = torch.zeros(__lowercase , __lowercase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(__lowercase , __lowercase ) ): __UpperCamelCase = input_row['sentence'] __UpperCamelCase = 1 __UpperCamelCase = torch.stack([row['image'] for row in batch] ) __UpperCamelCase = torch.stack([row['label'] for row in batch] ) __UpperCamelCase = torch.stack([row['image_start_token'] for row in batch] ) __UpperCamelCase = torch.stack([row['image_end_token'] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def _a ( ) -> Dict: """simple docstring""" return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def _a ( ) -> Any: """simple docstring""" return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ), ] )
567
0
from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("""socket.socket""" ) @patch("""builtins.open""" ) def __lowerCamelCase ( A__ : Any , A__ : Optional[int] ) -> List[Any]: lowerCamelCase_ : List[str] = Mock() lowerCamelCase_ : Union[str, Any] = conn, Mock() lowerCamelCase_ : Union[str, Any] = iter([1, None] ) lowerCamelCase_ : int = lambda A__ : next(__lowerCAmelCase ) # ===== invoke ===== send_file(filename="""mytext.txt""" , testing=__lowerCAmelCase ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
278
"""simple docstring""" import os from distutils.util import strtobool def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]: '''simple docstring''' for e in env_keys: lowerCamelCase__ =int(os.environ.get(__lowerCAmelCase , -1 ) ) if val >= 0: return val return default def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[str]: '''simple docstring''' lowerCamelCase__ =os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) ) return strtobool(__lowerCAmelCase ) == 1 # As its name indicates `strtobool` actually returns an int... def lowerCamelCase_ ( __lowerCAmelCase , __lowerCAmelCase="no" ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase__ =os.environ.get(__lowerCAmelCase , str(__lowerCAmelCase ) ) return value
530
0
"""simple docstring""" from .data_collator import ( DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeqaSeq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, DefaultDataCollator, default_data_collator, ) from .metrics import glue_compute_metrics, xnli_compute_metrics from .processors import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, )
51
"""simple docstring""" def lowercase__ ( lowercase_ ) -> set: """simple docstring""" _UpperCamelCase : Union[str, Any] = set() # edges = list of graph's edges _UpperCamelCase : Union[str, Any] = get_edges(lowercase_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: _UpperCamelCase, _UpperCamelCase : str = edges.pop() chosen_vertices.add(lowercase_ ) chosen_vertices.add(lowercase_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(lowercase_ ) return chosen_vertices def lowercase__ ( lowercase_ ) -> set: """simple docstring""" _UpperCamelCase : List[str] = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
51
1
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): __lowercase : Optional[Any] = None if token is not None: __lowercase : Tuple = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} __lowercase : List[str] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100""" __lowercase : Optional[Any] = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json() __lowercase : Optional[Any] = {} try: job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) __lowercase : Dict = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(__UpperCamelCase ): __lowercase : str = requests.get(url + f"""&page={i + 2}""" , headers=__UpperCamelCase ).json() job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return job_links except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): __lowercase : Any = None if token is not None: __lowercase : Optional[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} __lowercase : int = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100""" __lowercase : List[Any] = requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json() __lowercase : Union[str, Any] = {} try: artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) __lowercase : Dict = math.ceil((result['''total_count'''] - 1_00) / 1_00 ) for i in range(__UpperCamelCase ): __lowercase : Dict = requests.get(url + f"""&page={i + 2}""" , headers=__UpperCamelCase ).json() artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} ) return artifacts except Exception: print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" ) return {} def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Optional[int] = None if token is not None: __lowercase : Union[str, Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""} __lowercase : Any = requests.get(__UpperCamelCase , headers=__UpperCamelCase , allow_redirects=__UpperCamelCase ) __lowercase : Optional[int] = result.headers['''Location'''] __lowercase : List[str] = requests.get(__UpperCamelCase , allow_redirects=__UpperCamelCase ) __lowercase : List[str] = os.path.join(__UpperCamelCase , f"""{artifact_name}.zip""" ) with open(__UpperCamelCase , '''wb''' ) as fp: fp.write(response.content ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): __lowercase : Any = [] __lowercase : List[Any] = [] __lowercase : Dict = None with zipfile.ZipFile(__UpperCamelCase ) as z: for filename in z.namelist(): if not os.path.isdir(__UpperCamelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(__UpperCamelCase ) as f: for line in f: __lowercase : Optional[Any] = line.decode('''UTF-8''' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs __lowercase : Any = line[: line.index(''': ''' )] __lowercase : str = line[line.index(''': ''' ) + len(''': ''' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ): # `test` is the test method that failed __lowercase : Dict = line[len('''FAILED ''' ) :] failed_tests.append(__UpperCamelCase ) elif filename == "job_name.txt": __lowercase : int = line if len(__UpperCamelCase ) != len(__UpperCamelCase ): raise ValueError( f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__UpperCamelCase )} for `errors` """ f"""and {len(__UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some""" ''' problem.''' ) __lowercase : List[str] = None if job_name and job_links: __lowercase : Optional[Any] = job_links.get(__UpperCamelCase , __UpperCamelCase ) # A list with elements of the form (line of error, error, failed test) __lowercase : Any = [x + [y] + [job_link] for x, y in zip(__UpperCamelCase , __UpperCamelCase )] return result def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): __lowercase : Tuple = [] __lowercase : Tuple = [os.path.join(__UpperCamelCase , __UpperCamelCase ) for p in os.listdir(__UpperCamelCase ) if p.endswith('''.zip''' )] for p in paths: errors.extend(get_errors_from_single_artifact(__UpperCamelCase , job_links=__UpperCamelCase ) ) return errors def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): __lowercase : str = Counter() counter.update([x[1] for x in logs] ) __lowercase : Dict = counter.most_common() __lowercase : Optional[int] = {} for error, count in counts: if error_filter is None or error not in error_filter: __lowercase : Tuple = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]} __lowercase : Tuple = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) ) return r def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : int = test.split('''::''' )[0] if test.startswith('''tests/models/''' ): __lowercase : Optional[Any] = test.split('''/''' )[2] else: __lowercase : List[Any] = None return test def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=None ): __lowercase : Dict = [(x[0], x[1], get_model(x[2] )) for x in logs] __lowercase : List[str] = [x for x in logs if x[2] is not None] __lowercase : Dict = {x[2] for x in logs} __lowercase : Union[str, Any] = {} for test in tests: __lowercase : Optional[int] = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) __lowercase : List[str] = counter.most_common() __lowercase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} __lowercase : Optional[Any] = sum(error_counts.values() ) if n_errors > 0: __lowercase : Optional[Any] = {'''count''': n_errors, '''errors''': error_counts} __lowercase : List[Any] = dict(sorted(r.items() , key=lambda __UpperCamelCase : item[1]["count"] , reverse=__UpperCamelCase ) ) return r def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = '''| no. | error | status |''' __lowercase : Dict = '''|-:|:-|:-|''' __lowercase : int = [header, sep] for error in reduced_by_error: __lowercase : int = reduced_by_error[error]['''count'''] __lowercase : Union[str, Any] = f"""| {count} | {error[:1_00]} | |""" lines.append(__UpperCamelCase ) return "\n".join(__UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = '''| model | no. of errors | major error | count |''' __lowercase : Dict = '''|-:|-:|-:|-:|''' __lowercase : int = [header, sep] for model in reduced_by_model: __lowercase : Union[str, Any] = reduced_by_model[model]['''count'''] __lowercase ,__lowercase : Tuple = list(reduced_by_model[model]['''errors'''].items() )[0] __lowercase : Tuple = f"""| {model} | {count} | {error[:60]} | {_count} |""" lines.append(__UpperCamelCase ) return "\n".join(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') parser.add_argument( '--output_dir', type=str, required=True, help='Where to store the downloaded artifacts and other result files.', ) parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.') a_ = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) a_ = get_job_links(args.workflow_run_id, token=args.token) a_ = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: a_ = k.find(' / ') a_ = k[index + len(' / ') :] a_ = v with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) a_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) a_ = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error a_ = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors a_ = counter.most_common(3_0) for item in most_common: print(item) with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) a_ = reduce_by_error(errors) a_ = reduce_by_model(errors) a_ = make_github_table(reduced_by_error) a_ = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa) with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp: fp.write(sa)
76
import importlib import inspect import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py UpperCamelCase__ : Optional[int] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. UpperCamelCase__ : int = importlib.util.spec_from_file_location( """transformers""", os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) UpperCamelCase__ : Dict = spec.loader.load_module() UpperCamelCase__ : Optional[int] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` UpperCamelCase__ : Any = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") UpperCamelCase__ : int = { """CLIPConfigMixin""", """DecisionTransformerConfigMixin""", """EncoderDecoderConfigMixin""", """RagConfigMixin""", """SpeechEncoderDecoderConfigMixin""", """VisionEncoderDecoderConfigMixin""", """VisionTextDualEncoderConfigMixin""", } def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]: """simple docstring""" a = [] for config_class in list(CONFIG_MAPPING.values() ): a = False # source code of `config_class` a = inspect.getsource(snake_case_ ) a = _re_checkpoint.findall(snake_case_ ) for checkpoint in checkpoints: # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` a , a = checkpoint # verify the checkpoint name corresponds to the checkpoint link a = f"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: a = True break a = config_class.__name__ if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(snake_case_ ) if len(snake_case_ ) > 0: a = '''\n'''.join(sorted(snake_case_ ) ) raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
387
0
from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase__ = logging.get_logger(__name__) # General docstring lowerCamelCase__ = '''RegNetConfig''' # Base docstring lowerCamelCase__ = '''facebook/regnet-y-040''' lowerCamelCase__ = [1, 1088, 7, 7] # Image classification docstring lowerCamelCase__ = '''facebook/regnet-y-040''' lowerCamelCase__ = '''tabby, tabby cat''' lowerCamelCase__ = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ) -> Tuple: """simple docstring""" super().__init__() _UpperCamelCase = nn.Convad( lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , ) _UpperCamelCase = nn.BatchNormad(lowercase_) _UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity() def __UpperCAmelCase ( self : str , lowercase_ : List[Any]) -> List[str]: """simple docstring""" _UpperCamelCase = self.convolution(lowercase_) _UpperCamelCase = self.normalization(lowercase_) _UpperCamelCase = self.activation(lowercase_) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : RegNetConfig) -> str: """simple docstring""" super().__init__() _UpperCamelCase = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act) _UpperCamelCase = config.num_channels def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any) -> Optional[Any]: """simple docstring""" _UpperCamelCase = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration.") _UpperCamelCase = self.embedder(lowercase_) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2) -> Tuple: """simple docstring""" super().__init__() _UpperCamelCase = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_) _UpperCamelCase = nn.BatchNormad(lowercase_) def __UpperCAmelCase ( self : str , lowercase_ : Tensor) -> Tensor: """simple docstring""" _UpperCamelCase = self.convolution(lowercase_) _UpperCamelCase = self.normalization(lowercase_) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowercase_ : int , lowercase_ : int) -> List[Any]: """simple docstring""" super().__init__() _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1)) _UpperCamelCase = nn.Sequential( nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1) , nn.Sigmoid() , ) def __UpperCAmelCase ( self : int , lowercase_ : Union[str, Any]) -> Tuple: """simple docstring""" _UpperCamelCase = self.pooler(lowercase_) _UpperCamelCase = self.attention(lowercase_) _UpperCamelCase = hidden_state * attention return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1) -> Dict: """simple docstring""" super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width) _UpperCamelCase = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , ) _UpperCamelCase = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> Optional[Any]: """simple docstring""" _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowercase_) _UpperCamelCase = self.shortcut(lowercase_) hidden_state += residual _UpperCamelCase = self.activation(lowercase_) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1) -> List[str]: """simple docstring""" super().__init__() _UpperCamelCase = in_channels != out_channels or stride != 1 _UpperCamelCase = max(1 , out_channels // config.groups_width) _UpperCamelCase = ( RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_) if should_apply_shortcut else nn.Identity() ) _UpperCamelCase = nn.Sequential( RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4))) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_) , ) _UpperCamelCase = ACTaFN[config.hidden_act] def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Tuple) -> Dict: """simple docstring""" _UpperCamelCase = hidden_state _UpperCamelCase = self.layer(lowercase_) _UpperCamelCase = self.shortcut(lowercase_) hidden_state += residual _UpperCamelCase = self.activation(lowercase_) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : Any , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ) -> int: """simple docstring""" super().__init__() _UpperCamelCase = RegNetXLayer if config.layer_type == "x" else RegNetYLayer _UpperCamelCase = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_) for _ in range(depth - 1)] , ) def __UpperCAmelCase ( self : Dict , lowercase_ : List[str]) -> Any: """simple docstring""" _UpperCamelCase = self.layers(lowercase_) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : List[Any] , lowercase_ : RegNetConfig) -> str: """simple docstring""" super().__init__() _UpperCamelCase = nn.ModuleList([]) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , )) _UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:]) for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:]): self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_)) def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True) -> BaseModelOutputWithNoAttention: """simple docstring""" _UpperCamelCase = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) _UpperCamelCase = stage_module(lowercase_) if output_hidden_states: _UpperCamelCase = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None) return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_) class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' __A = RegNetConfig __A = '''regnet''' __A = '''pixel_values''' __A = True def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple) -> List[str]: """simple docstring""" if isinstance(lowercase_ , nn.Convad): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu") elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm)): nn.init.constant_(module.weight , 1) nn.init.constant_(module.bias , 0) def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Union[str, Any]=False) -> int: """simple docstring""" if isinstance(lowercase_ , lowercase_): _UpperCamelCase = value lowerCamelCase__ = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase__ = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''', lowerCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Tuple , lowercase_ : Optional[int]) -> Tuple: """simple docstring""" super().__init__(lowercase_) _UpperCamelCase = config _UpperCamelCase = RegNetEmbeddings(lowercase_) _UpperCamelCase = RegNetEncoder(lowercase_) _UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCAmelCase ( self : Dict , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None) -> BaseModelOutputWithPoolingAndNoAttention: """simple docstring""" _UpperCamelCase = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.embedder(lowercase_) _UpperCamelCase = self.encoder( lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_) _UpperCamelCase = encoder_outputs[0] _UpperCamelCase = self.pooler(lowercase_) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''', lowerCAmelCase, ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Any) -> Any: """simple docstring""" super().__init__(lowercase_) _UpperCamelCase = config.num_labels _UpperCamelCase = RegNetModel(lowercase_) # classification head _UpperCamelCase = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: """simple docstring""" _UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict _UpperCamelCase = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_) _UpperCamelCase = outputs.pooler_output if return_dict else outputs[1] _UpperCamelCase = self.classifier(lowercase_) _UpperCamelCase = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: _UpperCamelCase = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): _UpperCamelCase = "single_label_classification" else: _UpperCamelCase = "multi_label_classification" if self.config.problem_type == "regression": _UpperCamelCase = MSELoss() if self.num_labels == 1: _UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze()) else: _UpperCamelCase = loss_fct(lowercase_ , lowercase_) elif self.config.problem_type == "single_label_classification": _UpperCamelCase = CrossEntropyLoss() _UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1)) elif self.config.problem_type == "multi_label_classification": _UpperCamelCase = BCEWithLogitsLoss() _UpperCamelCase = loss_fct(lowercase_ , lowercase_) if not return_dict: _UpperCamelCase = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states)
717
import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch lowerCamelCase__ = logging.get_logger(__name__) class _UpperCAmelCase : '''simple docstring''' def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict: """simple docstring""" if not conversation_id: _UpperCamelCase = uuid.uuida() if past_user_inputs is None: _UpperCamelCase = [] if generated_responses is None: _UpperCamelCase = [] _UpperCamelCase = conversation_id _UpperCamelCase = past_user_inputs _UpperCamelCase = generated_responses _UpperCamelCase = text def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]: """simple docstring""" if not isinstance(lowercase_ , lowercase_): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any: """simple docstring""" if self.new_user_input: if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' f'with: "{text}".') _UpperCamelCase = text else: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input') else: _UpperCamelCase = text def __UpperCAmelCase ( self : Optional[int]) -> List[Any]: """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input) _UpperCamelCase = None def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]: """simple docstring""" self.generated_responses.append(lowercase_) def __UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Union[str, Any]) -> int: """simple docstring""" _UpperCamelCase = f'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): _UpperCamelCase = "user" if is_user else "bot" output += f'{name} >> {text} \n' return output @add_end_docstrings( lowerCAmelCase, R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''', ) class _UpperCAmelCase ( lowerCAmelCase ): '''simple docstring''' def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]: """simple docstring""" super().__init__(*lowercase_ , **lowercase_) if self.tokenizer.pad_token_id is None: _UpperCamelCase = self.tokenizer.eos_token def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple: """simple docstring""" _UpperCamelCase = {} _UpperCamelCase = {} _UpperCamelCase = {} if min_length_for_response is not None: _UpperCamelCase = min_length_for_response if minimum_tokens is not None: _UpperCamelCase = minimum_tokens if "max_length" in generate_kwargs: _UpperCamelCase = generate_kwargs["max_length"] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: _UpperCamelCase = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowercase_) return preprocess_params, forward_params, postprocess_params def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_) if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1: return outputs[0] return outputs def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]: """simple docstring""" if not isinstance(lowercase_ , lowercase_): raise ValueError("ConversationalPipeline, expects Conversation as inputs") if conversation.new_user_input is None: raise ValueError( f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. ' "Add user inputs with the conversation's `add_user_input` method") if hasattr(self.tokenizer , "_build_conversation_input_ids"): _UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_) else: # If the tokenizer cannot handle conversations, we default to only the old version _UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_) if self.framework == "pt": _UpperCamelCase = torch.LongTensor([input_ids]) elif self.framework == "tf": _UpperCamelCase = tf.constant([input_ids]) return {"input_ids": input_ids, "conversation": conversation} def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]: """simple docstring""" _UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length) _UpperCamelCase = model_inputs["input_ids"].shape[1] if max_length - minimum_tokens < n: logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})') _UpperCamelCase = max_length - minimum_tokens _UpperCamelCase = model_inputs["input_ids"][:, -trim:] if "attention_mask" in model_inputs: _UpperCamelCase = model_inputs["attention_mask"][:, -trim:] _UpperCamelCase = model_inputs.pop("conversation") _UpperCamelCase = max_length _UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_) if self.model.config.is_encoder_decoder: _UpperCamelCase = 1 else: _UpperCamelCase = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]: """simple docstring""" _UpperCamelCase = model_outputs["output_ids"] _UpperCamelCase = self.tokenizer.decode( output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , ) _UpperCamelCase = model_outputs["conversation"] conversation.mark_processed() conversation.append_response(lowercase_) return conversation def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict: """simple docstring""" _UpperCamelCase = self.tokenizer.eos_token_id _UpperCamelCase = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id]) else: input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)) if len(lowercase_) > self.tokenizer.model_max_length: _UpperCamelCase = input_ids[-self.tokenizer.model_max_length :] return input_ids
82
0
import numpy # List of input, output pairs UpperCAmelCase__ = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) UpperCAmelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150)) UpperCAmelCase__ = [2, 4, 1, 5] UpperCAmelCase__ = len(train_data) UpperCAmelCase__ = 0.009 def _a ( a :int , a :Optional[int]="train" ) -> Any: return calculate_hypothesis_value(a , a ) - output( a , a ) def _a ( a :Tuple ) -> str: a = 0 for i in range(len(a ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _a ( a :str , a :Optional[Any] ) -> str: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _a ( a :Tuple , a :Dict ) -> Dict: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _a ( a :Dict , a :Optional[Any]=m ) -> Dict: a = 0 for i in range(a ): if index == -1: summation_value += _error(a ) else: summation_value += _error(a ) * train_data[i][0][index] return summation_value def _a ( a :str ) -> Dict: a = summation_of_cost_derivative(a , a ) / m return cost_derivative_value def _a ( ) -> Optional[Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output a = 0.000_002 a = 0 a = 0 while True: j += 1 a = [0, 0, 0, 0] for i in range(0 , len(a ) ): a = get_cost_derivative(i - 1 ) a = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( a , a , atol=a , rtol=a , ): break a = temp_parameter_vector print(('''Number of iterations:''', j) ) def _a ( ) -> Union[str, Any]: for i in range(len(a ) ): print(('''Actual output value:''', output(a , '''test''' )) ) print(('''Hypothesis output:''', calculate_hypothesis_value(a , '''test''' )) ) if __name__ == "__main__": run_gradient_descent() print("\nTesting gradient descent for a linear hypothesis function.\n") test_gradient_descent()
117
from typing import Any import numpy as np def _a ( a :np.ndarray ) -> bool: return np.array_equal(a , matrix.conjugate().T ) def _a ( a :np.ndarray , a :np.ndarray ) -> Any: a = v.conjugate().T a = v_star.dot(a ) assert isinstance(a , np.ndarray ) return (v_star_dot.dot(a )) / (v_star.dot(a )) def _a ( ) -> None: a = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) a = np.array([[1], [2], [3]] ) assert is_hermitian(a ), F"""{a} is not hermitian.""" print(rayleigh_quotient(a , a ) ) a = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(a ), F"""{a} is not hermitian.""" assert rayleigh_quotient(a , a ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
117
1
"""simple docstring""" a = 8.314_4598 def lowercase (snake_case__ : float , snake_case__ : float ) -> float: '''simple docstring''' if temperature < 0: raise Exception("""Temperature cannot be less than 0 K""" ) if molar_mass <= 0: raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a = 3_0_0 a = 2_8 a = rms_speed_of_molecule(temperature, molar_mass) print(f"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
702
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @property def __lowercase ( self : Any ): torch.manual_seed(0 ) lowerCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __lowercase ( self : Optional[int] ): lowerCAmelCase = self.dummy_uncond_unet lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type="""numpy""" ).images lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=2 , generator=lowerCAmelCase , output_type="""numpy""" , return_dict=lowerCAmelCase )[0] lowerCAmelCase = image[0, -3:, -3:, -1] lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def __lowercase ( self : str ): lowerCAmelCase = """google/ncsnpp-celebahq-256""" lowerCAmelCase = UNetaDModel.from_pretrained(lowerCAmelCase ) lowerCAmelCase = KarrasVeScheduler() lowerCAmelCase = KarrasVePipeline(unet=lowerCAmelCase , scheduler=lowerCAmelCase ) pipe.to(lowerCAmelCase ) pipe.set_progress_bar_config(disable=lowerCAmelCase ) lowerCAmelCase = torch.manual_seed(0 ) lowerCAmelCase = pipe(num_inference_steps=20 , generator=lowerCAmelCase , output_type="""numpy""" ).images lowerCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowerCAmelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
529
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class snake_case_ ( __A ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = "dandelin/vilt-b32-finetuned-vqa" SCREAMING_SNAKE_CASE : Any = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) SCREAMING_SNAKE_CASE : Tuple = "image_qa" SCREAMING_SNAKE_CASE : str = AutoProcessor SCREAMING_SNAKE_CASE : int = AutoModelForVisualQuestionAnswering SCREAMING_SNAKE_CASE : str = ["image", "text"] SCREAMING_SNAKE_CASE : Optional[int] = ["text"] def __init__( self : List[Any] , *_UpperCamelCase : Dict , **_UpperCamelCase : List[Any] ) ->int: requires_backends(self , ['''vision'''] ) super().__init__(*_UpperCamelCase , **_UpperCamelCase ) def snake_case__( self : Optional[int] , _UpperCamelCase : "Image" , _UpperCamelCase : str ) ->Union[str, Any]: return self.pre_processor(_UpperCamelCase , _UpperCamelCase , return_tensors='''pt''' ) def snake_case__( self : Any , _UpperCamelCase : Union[str, Any] ) ->str: with torch.no_grad(): return self.model(**_UpperCamelCase ).logits def snake_case__( self : Union[str, Any] , _UpperCamelCase : Dict ) ->Any: snake_case_ = outputs.argmax(-1 ).item() return self.model.config.idalabel[idx]
39
'''simple docstring''' from __future__ import annotations lowerCAmelCase: str = 'Muhammad Umer Farooq' lowerCAmelCase: List[str] = 'MIT' lowerCAmelCase: Tuple = '1.0.0' lowerCAmelCase: List[Any] = 'Muhammad Umer Farooq' lowerCAmelCase: Optional[Any] = 'contact@muhammadumerfarooq.me' lowerCAmelCase: Dict = 'Alpha' import re from html.parser import HTMLParser from urllib import parse import requests class a__( lowerCamelCase__ ): def __init__( self : Dict , __snake_case : str ): super().__init__() a : list[str] = [] a : List[Any] = domain def lowercase_ ( self : Dict , __snake_case : str , __snake_case : list[tuple[str, str | None]] ): # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: a : Tuple = parse.urljoin(self.domain , __snake_case ) self.urls.append(__snake_case ) def lowerCamelCase__ ( _A ): return ".".join(get_sub_domain_name(_A ).split('.' )[-2:] ) def lowerCamelCase__ ( _A ): return parse.urlparse(_A ).netloc def lowerCamelCase__ ( _A = "https://github.com" ): a : Any = get_domain_name(_A ) # Initialize the parser a : Tuple = Parser(_A ) try: # Open URL a : List[Any] = requests.get(_A ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through a : Union[str, Any] = set() for link in parser.urls: # open URL. # read = requests.get(link) try: a : int = requests.get(_A ) # Get the valid email. a : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(_A ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_A ) if __name__ == "__main__": lowerCAmelCase: Any = emails_from_url('https://github.com') print(F"{len(emails)} emails found:") print('\n'.join(sorted(emails)))
526
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a : Optional[int] = logging.get_logger(__name__) a : Optional[Any] = { '''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''', '''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''', } class _UpperCamelCase ( __UpperCamelCase ): '''simple docstring''' __lowercase : int = 'luke' def __init__( self , __lowercase=50267 , __lowercase=500000 , __lowercase=768 , __lowercase=256 , __lowercase=12 , __lowercase=12 , __lowercase=3072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=1e-12 , __lowercase=True , __lowercase=None , __lowercase=1 , __lowercase=0 , __lowercase=2 , **__lowercase , ): super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) UpperCAmelCase__ = vocab_size UpperCAmelCase__ = entity_vocab_size UpperCAmelCase__ = hidden_size UpperCAmelCase__ = entity_emb_size UpperCAmelCase__ = num_hidden_layers UpperCAmelCase__ = num_attention_heads UpperCAmelCase__ = hidden_act UpperCAmelCase__ = intermediate_size UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = max_position_embeddings UpperCAmelCase__ = type_vocab_size UpperCAmelCase__ = initializer_range UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = use_entity_aware_attention UpperCAmelCase__ = classifier_dropout
422
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Dict: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def snake_case__ ( ) ->Optional[Any]: with parallel_backend("""spark""" ): assert ParallelBackendConfig.backend_name == "spark" UpperCAmelCase__ = [1, 2, 3] with pytest.raises(_SCREAMING_SNAKE_CASE ): with parallel_backend("""unsupported backend""" ): map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=2 ) with pytest.raises(_SCREAMING_SNAKE_CASE ): with parallel_backend("""unsupported backend""" ): map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize("""num_proc""" , [2, -1] ) def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->Tuple: UpperCAmelCase__ = [1, 2] UpperCAmelCase__ = {"""a""": 1, """b""": 2} UpperCAmelCase__ = {"""a""": [1, 2], """b""": [3, 4]} UpperCAmelCase__ = {"""a""": {"""1""": 1}, """b""": 2} UpperCAmelCase__ = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4} UpperCAmelCase__ = [2, 3] UpperCAmelCase__ = {"""a""": 2, """b""": 3} UpperCAmelCase__ = {"""a""": [2, 3], """b""": [4, 5]} UpperCAmelCase__ = {"""a""": {"""1""": 2}, """b""": 3} UpperCAmelCase__ = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5} with parallel_backend("""spark""" ): assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa assert map_nested(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE ) == expected_map_nested_sa
422
1
"""simple docstring""" import torch from diffusers import StableDiffusionPipeline UpperCAmelCase ="path-to-your-trained-model" UpperCAmelCase =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda") UpperCAmelCase ="A photo of sks dog in a bucket" UpperCAmelCase =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png")
617
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase_ (lowerCamelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = 'facebook/bart-large-mnli' SCREAMING_SNAKE_CASE : Optional[Any] = ( 'This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which ' 'should be the text to classify, and `labels`, which should be the list of labels to use for classification. ' 'It returns the most likely label in the list of provided `labels` for the input text.' ) SCREAMING_SNAKE_CASE : Any = 'text_classifier' SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForSequenceClassification SCREAMING_SNAKE_CASE : Tuple = ['text', ['text']] SCREAMING_SNAKE_CASE : List[str] = ['text'] def SCREAMING_SNAKE_CASE ( self : List[Any] ): super().setup() __lowercase = self.model.config __lowercase = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): __lowercase = int(lowercase__ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict ,lowercase__ : List[Any] ): __lowercase = labels return self.pre_processor( [text] * len(lowercase__ ) ,[F"This example is {label}" for label in labels] ,return_tensors='''pt''' ,padding='''max_length''' ,) def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : int ): __lowercase = outputs.logits __lowercase = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
41
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = '''▁''' __lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __lowerCAmelCase = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } __lowerCAmelCase = { '''facebook/m2m100_418M''': 10_24, } # fmt: off __lowerCAmelCase = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class __a ( __UpperCamelCase ): __lowercase : Tuple = VOCAB_FILES_NAMES __lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase : List[str] = PRETRAINED_VOCAB_FILES_MAP __lowercase : int = ['input_ids', 'attention_mask'] __lowercase : List[int] = [] __lowercase : List[int] = [] def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="m2m100" , lowerCAmelCase__ = None , lowerCAmelCase__=8 , **lowerCAmelCase__ , ) -> None: '''simple docstring''' lowercase__: Dict = {} if sp_model_kwargs is None else sp_model_kwargs lowercase__: Optional[int] = language_codes lowercase__: List[str] = FAIRSEQ_LANGUAGE_CODES[language_codes] lowercase__: str = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code} lowercase__: Optional[int] = kwargs.get('additional_special_tokens' , [] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(lowerCAmelCase__ ) for lang_code in fairseq_language_code if self.get_lang_token(lowerCAmelCase__ ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , language_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowerCAmelCase__ , **lowerCAmelCase__ , ) lowercase__: Tuple = vocab_file lowercase__: Optional[Any] = load_json(lowerCAmelCase__ ) lowercase__: Tuple = {v: k for k, v in self.encoder.items()} lowercase__: Union[str, Any] = spm_file lowercase__: List[str] = load_spm(lowerCAmelCase__ , self.sp_model_kwargs ) lowercase__: Any = len(self.encoder ) lowercase__: Optional[Any] = { self.get_lang_token(lowerCAmelCase__ ): self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ ) } lowercase__: Dict = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowerCAmelCase__ )} lowercase__: Dict = {v: k for k, v in self.lang_token_to_id.items()} lowercase__: str = src_lang if src_lang is not None else 'en' lowercase__: List[str] = tgt_lang lowercase__: str = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) lowercase__: Dict = num_madeup_words @property def SCREAMING_SNAKE_CASE__ ( self ) -> int: '''simple docstring''' return len(self.encoder ) + len(self.lang_token_to_id ) @property def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None: '''simple docstring''' lowercase__: Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str: '''simple docstring''' if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str: '''simple docstring''' if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(lowerCAmelCase__ , self.unk_token ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' lowercase__: int = [] lowercase__: Tuple = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(lowerCAmelCase__ ) + token lowercase__: List[str] = [] else: current_sub_tokens.append(lowerCAmelCase__ ) out_string += self.sp_model.decode(lowerCAmelCase__ ) return out_string.strip() def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ ) lowercase__: Dict = [1] * len(self.prefix_tokens ) lowercase__: Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' lowercase__: str = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: '''simple docstring''' lowercase__: Optional[int] = self.__dict__.copy() lowercase__: List[str] = None return state def __setstate__( self , lowerCAmelCase__ ) -> None: '''simple docstring''' lowercase__: int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowercase__: List[str] = {} lowercase__: Tuple = load_spm(self.spm_file , self.sp_model_kwargs ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]: '''simple docstring''' lowercase__: Optional[Any] = Path(lowerCAmelCase__ ) if not save_dir.is_dir(): raise OSError(F'{save_directory} should be a directory' ) lowercase__: Tuple = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) lowercase__: Tuple = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , lowerCAmelCase__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowerCAmelCase__ ) elif not os.path.isfile(self.spm_file ): with open(lowerCAmelCase__ , 'wb' ) as fi: lowercase__: int = self.sp_model.serialized_model_proto() fi.write(lowerCAmelCase__ ) return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ )) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = "en" , lowerCAmelCase__ = None , lowerCAmelCase__ = "ro" , **lowerCAmelCase__ , ) -> BatchEncoding: '''simple docstring''' lowercase__: Optional[int] = src_lang lowercase__: str = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowercase__: List[str] = src_lang lowercase__: List[str] = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ ) lowercase__: Dict = self.get_lang_id(lowerCAmelCase__ ) lowercase__: str = tgt_lang_id return inputs def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' self.set_src_lang_special_tokens(self.src_lang ) def SCREAMING_SNAKE_CASE__ ( self ) -> str: '''simple docstring''' self.set_tgt_lang_special_tokens(self.tgt_lang ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None: '''simple docstring''' lowercase__: int = self.get_lang_token(lowerCAmelCase__ ) lowercase__: Tuple = self.lang_token_to_id[lang_token] lowercase__: List[str] = [self.cur_lang_id] lowercase__: int = [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None: '''simple docstring''' lowercase__: List[Any] = self.get_lang_token(lowerCAmelCase__ ) lowercase__: Tuple = self.lang_token_to_id[lang_token] lowercase__: int = [self.cur_lang_id] lowercase__: List[Any] = [self.eos_token_id] def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> str: '''simple docstring''' return self.lang_code_to_token[lang] def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int: '''simple docstring''' lowercase__: Optional[Any] = self.get_lang_token(lowerCAmelCase__ ) return self.lang_token_to_id[lang_token] def snake_case_ ( snake_case , snake_case ) -> sentencepiece.SentencePieceProcessor: lowercase__: List[Any] = sentencepiece.SentencePieceProcessor(**snake_case ) spm.Load(str(snake_case ) ) return spm def snake_case_ ( snake_case ) -> Union[Dict, List]: with open(snake_case , 'r' ) as f: return json.load(snake_case ) def snake_case_ ( snake_case , snake_case ) -> None: with open(snake_case , 'w' ) as f: json.dump(snake_case , snake_case , indent=2 )
335
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __a ( __UpperCamelCase , unittest.TestCase ): __lowercase : int = DDIMPipeline __lowercase : Dict = UNCONDITIONAL_IMAGE_GENERATION_PARAMS __lowercase : List[str] = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'latents', 'callback', 'callback_steps', } __lowercase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS __lowercase : Any = False def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) lowercase__: Dict = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , ) lowercase__: int = DDIMScheduler() lowercase__: List[Any] = {'unet': unet, 'scheduler': scheduler} return components def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]: '''simple docstring''' if str(lowerCAmelCase__ ).startswith('mps' ): lowercase__: Any = torch.manual_seed(lowerCAmelCase__ ) else: lowercase__: Any = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) lowercase__: Any = { 'batch_size': 1, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' lowercase__: int = 'cpu' lowercase__: List[str] = self.get_dummy_components() lowercase__: Union[str, Any] = self.pipeline_class(**lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowercase__: List[str] = self.get_dummy_inputs(lowerCAmelCase__ ) lowercase__: str = pipe(**lowerCAmelCase__ ).images lowercase__: List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 32, 32, 3) ) lowercase__: Optional[Any] = np.array( [1.000E00, 5.717E-01, 4.717E-01, 1.000E00, 0.000E00, 1.000E00, 3.000E-04, 0.000E00, 9.000E-04] ) lowercase__: int = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase__ , 1E-3 ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' super().test_save_load_local(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]: '''simple docstring''' super().test_save_load_optional_components(expected_max_difference=3E-3 ) def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]: '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class __a ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' lowercase__: Tuple = 'google/ddpm-cifar10-32' lowercase__: Union[str, Any] = UNetaDModel.from_pretrained(lowerCAmelCase__ ) lowercase__: Optional[Any] = DDIMScheduler() lowercase__: List[str] = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) ddim.to(lowerCAmelCase__ ) ddim.set_progress_bar_config(disable=lowerCAmelCase__ ) lowercase__: Optional[Any] = torch.manual_seed(0 ) lowercase__: str = ddim(generator=lowerCAmelCase__ , eta=0.0 , output_type='numpy' ).images lowercase__: Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowercase__: Optional[Any] = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]: '''simple docstring''' lowercase__: Tuple = 'google/ddpm-ema-bedroom-256' lowercase__: int = UNetaDModel.from_pretrained(lowerCAmelCase__ ) lowercase__: Tuple = DDIMScheduler.from_pretrained(lowerCAmelCase__ ) lowercase__: Any = DDIMPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ ) ddpm.to(lowerCAmelCase__ ) ddpm.set_progress_bar_config(disable=lowerCAmelCase__ ) lowercase__: Optional[int] = torch.manual_seed(0 ) lowercase__: Tuple = ddpm(generator=lowerCAmelCase__ , output_type='numpy' ).images lowercase__: str = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) lowercase__: List[str] = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
335
1
from __future__ import annotations import math from collections.abc import Callable def lowerCamelCase ( UpperCamelCase : Callable[[int | float], int | float] , UpperCamelCase : int | float , UpperCamelCase : int | float , UpperCamelCase : int = 1_00 , ) -> float: _lowerCamelCase = x_start _lowerCamelCase = fnc(UpperCamelCase ) _lowerCamelCase = 0.0 for _ in range(UpperCamelCase ): # Approximates curve as a sequence of linear lines and sums their length _lowerCamelCase = (x_end - x_start) / steps + xa _lowerCamelCase = fnc(UpperCamelCase ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step _lowerCamelCase = xa _lowerCamelCase = fxa return length if __name__ == "__main__": def lowerCamelCase ( UpperCamelCase : Tuple ) -> Optional[Any]: return math.sin(10 * x ) print('f(x) = sin(10 * x)') print('The length of the curve from x = -10 to x = 10 is:') A = 1_0 while i <= 1_0_0_0_0_0: print(F'''With {i} steps: {line_length(f, -1_0, 1_0, i)}''') i *= 1_0
544
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() A = logging.get_logger(__name__) def lowerCamelCase ( UpperCamelCase : Union[str, Any] ) -> Any: # initialize config if "resnet-50" in model_name: _lowerCamelCase = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: _lowerCamelCase = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) _lowerCamelCase = DetrConfig(use_timm_backbone=UpperCamelCase , backbone_config=UpperCamelCase ) # set label attributes _lowerCamelCase = 'panoptic' in model_name if is_panoptic: _lowerCamelCase = 2_50 else: _lowerCamelCase = 91 _lowerCamelCase = 'huggingface/label-files' _lowerCamelCase = 'coco-detection-id2label.json' _lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase = {int(UpperCamelCase ): v for k, v in idalabel.items()} _lowerCamelCase = idalabel _lowerCamelCase = {v: k for k, v in idalabel.items()} return config, is_panoptic def lowerCamelCase ( UpperCamelCase : Optional[int] ) -> Dict: # here we list all keys to be renamed (original name on the left, our name on the right) _lowerCamelCase = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( F"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", F"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any ) -> Any: _lowerCamelCase = state_dict.pop(UpperCamelCase ) _lowerCamelCase = val def lowerCamelCase ( UpperCamelCase : List[Any] , UpperCamelCase : List[Any]=False ) -> List[str]: _lowerCamelCase = '' if is_panoptic: _lowerCamelCase = 'detr.' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _lowerCamelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase = in_proj_weight[:2_56, :] _lowerCamelCase = in_proj_bias[:2_56] _lowerCamelCase = in_proj_weight[2_56:5_12, :] _lowerCamelCase = in_proj_bias[2_56:5_12] _lowerCamelCase = in_proj_weight[-2_56:, :] _lowerCamelCase = in_proj_bias[-2_56:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention _lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _lowerCamelCase = in_proj_weight[:2_56, :] _lowerCamelCase = in_proj_bias[:2_56] _lowerCamelCase = in_proj_weight[2_56:5_12, :] _lowerCamelCase = in_proj_bias[2_56:5_12] _lowerCamelCase = in_proj_weight[-2_56:, :] _lowerCamelCase = in_proj_bias[-2_56:] # read in weights + bias of input projection layer of cross-attention _lowerCamelCase = state_dict.pop( F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) _lowerCamelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict _lowerCamelCase = in_proj_weight_cross_attn[:2_56, :] _lowerCamelCase = in_proj_bias_cross_attn[:2_56] _lowerCamelCase = in_proj_weight_cross_attn[2_56:5_12, :] _lowerCamelCase = in_proj_bias_cross_attn[2_56:5_12] _lowerCamelCase = in_proj_weight_cross_attn[-2_56:, :] _lowerCamelCase = in_proj_bias_cross_attn[-2_56:] def lowerCamelCase ( ) -> Tuple: _lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' _lowerCamelCase = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def lowerCamelCase ( UpperCamelCase : int , UpperCamelCase : Dict=None , UpperCamelCase : str=False ) -> Dict: _lowerCamelCase , _lowerCamelCase = get_detr_config(UpperCamelCase ) # load original model from torch hub _lowerCamelCase = { 'detr-resnet-50': 'detr_resnet50', 'detr-resnet-101': 'detr_resnet101', } logger.info(F"""Converting model {model_name}...""" ) _lowerCamelCase = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=UpperCamelCase ).eval() _lowerCamelCase = detr.state_dict() # rename keys for src, dest in create_rename_keys(UpperCamelCase ): if is_panoptic: _lowerCamelCase = 'detr.' + src rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(UpperCamelCase , is_panoptic=UpperCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _lowerCamelCase = 'detr.model.' if is_panoptic else 'model.' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): _lowerCamelCase = state_dict.pop(UpperCamelCase ) _lowerCamelCase = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _lowerCamelCase = state_dict.pop(UpperCamelCase ) _lowerCamelCase = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: _lowerCamelCase = state_dict.pop(UpperCamelCase ) _lowerCamelCase = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): _lowerCamelCase = state_dict.pop(UpperCamelCase ) _lowerCamelCase = val # finally, create HuggingFace model and load state dict _lowerCamelCase = DetrForSegmentation(UpperCamelCase ) if is_panoptic else DetrForObjectDetection(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # verify our conversion on an image _lowerCamelCase = 'coco_panoptic' if is_panoptic else 'coco_detection' _lowerCamelCase = DetrImageProcessor(format=UpperCamelCase ) _lowerCamelCase = processor(images=prepare_img() , return_tensors='pt' ) _lowerCamelCase = encoding['pixel_values'] _lowerCamelCase = detr(UpperCamelCase ) _lowerCamelCase = model(UpperCamelCase ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) model.save_pretrained(UpperCamelCase ) processor.save_pretrained(UpperCamelCase ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(F"""nielsr/{model_name}""" ) processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": A = argparse.ArgumentParser() parser.add_argument( '--model_name', default='detr-resnet-50', type=str, choices=['detr-resnet-50', 'detr-resnet-101'], help='Name of the DETR model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.') A = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
544
1
'''simple docstring''' import argparse import pathlib import fairseq import torch from fairseq.models.roberta import RobertaModel as FairseqRobertaModel from fairseq.modules import TransformerSentenceEncoderLayer from packaging import version from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.models.roberta.modeling_roberta import RobertaAttention from transformers.utils import logging if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''): raise Exception('''requires fairseq >= 1.0.0a''') logging.set_verbosity_info() _UpperCAmelCase : str = logging.get_logger(__name__) _UpperCAmelCase : str = '''Hello world! cécé herlolip''' def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ) -> Dict: '''simple docstring''' lowercase =FairseqRobertaModel.from_pretrained(__SCREAMING_SNAKE_CASE ) roberta.eval() # disable dropout lowercase =roberta.model.encoder.sentence_encoder lowercase =XLMRobertaConfig( vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1E-5 , ) if classification_head: lowercase =roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0] print('''Our RoBERTa config:''' , __SCREAMING_SNAKE_CASE ) lowercase =XLMRobertaXLForSequenceClassification(__SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(__SCREAMING_SNAKE_CASE ) model.eval() # Now let's copy all the weights. # Embeddings lowercase =roberta_sent_encoder.embed_tokens.weight lowercase =roberta_sent_encoder.embed_positions.weight lowercase =torch.zeros_like( model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them. lowercase =roberta_sent_encoder.layer_norm.weight lowercase =roberta_sent_encoder.layer_norm.bias for i in range(config.num_hidden_layers ): # Encoder: start of layer lowercase =model.roberta.encoder.layer[i] lowercase =roberta_sent_encoder.layers[i] lowercase =layer.attention lowercase =roberta_layer.self_attn_layer_norm.weight lowercase =roberta_layer.self_attn_layer_norm.bias # self attention lowercase =layer.attention.self assert ( roberta_layer.self_attn.k_proj.weight.data.shape == roberta_layer.self_attn.q_proj.weight.data.shape == roberta_layer.self_attn.v_proj.weight.data.shape == torch.Size((config.hidden_size, config.hidden_size) ) ) lowercase =roberta_layer.self_attn.q_proj.weight lowercase =roberta_layer.self_attn.q_proj.bias lowercase =roberta_layer.self_attn.k_proj.weight lowercase =roberta_layer.self_attn.k_proj.bias lowercase =roberta_layer.self_attn.v_proj.weight lowercase =roberta_layer.self_attn.v_proj.bias # self-attention output lowercase =layer.attention.output assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape lowercase =roberta_layer.self_attn.out_proj.weight lowercase =roberta_layer.self_attn.out_proj.bias # this one is final layer norm lowercase =roberta_layer.final_layer_norm.weight lowercase =roberta_layer.final_layer_norm.bias # intermediate lowercase =layer.intermediate assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape lowercase =roberta_layer.fca.weight lowercase =roberta_layer.fca.bias # output lowercase =layer.output assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape lowercase =roberta_layer.fca.weight lowercase =roberta_layer.fca.bias # end of layer if classification_head: lowercase =roberta.model.classification_heads['''mnli'''].dense.weight lowercase =roberta.model.classification_heads['''mnli'''].dense.bias lowercase =roberta.model.classification_heads['''mnli'''].out_proj.weight lowercase =roberta.model.classification_heads['''mnli'''].out_proj.bias else: # LM Head lowercase =roberta.model.encoder.lm_head.dense.weight lowercase =roberta.model.encoder.lm_head.dense.bias lowercase =roberta.model.encoder.lm_head.layer_norm.weight lowercase =roberta.model.encoder.lm_head.layer_norm.bias lowercase =roberta.model.encoder.lm_head.weight lowercase =roberta.model.encoder.lm_head.bias # Let's check that we get the same results. lowercase =roberta.encode(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1 lowercase =model(__SCREAMING_SNAKE_CASE )[0] if classification_head: lowercase =roberta.model.classification_heads['''mnli'''](roberta.extract_features(__SCREAMING_SNAKE_CASE ) ) else: lowercase =roberta.model(__SCREAMING_SNAKE_CASE )[0] print(our_output.shape , their_output.shape ) lowercase =torch.max(torch.abs(our_output - their_output ) ).item() print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7 lowercase =torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' ) if not success: raise Exception('''Something went wRoNg''' ) pathlib.Path(__SCREAMING_SNAKE_CASE ).mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": _UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''' ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.''' ) _UpperCAmelCase : str = parser.parse_args() convert_xlm_roberta_xl_checkpoint_to_pytorch( args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head )
701
'''simple docstring''' from __future__ import annotations import numpy as np def UpperCamelCase ( lowercase_ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]: '''simple docstring''' lowercase , lowercase =np.shape(lowercase_ ) if rows != columns: lowercase =( '''\'table\' has to be of square shaped array but got a ''' f'{rows}x{columns} array:\n{table}' ) raise ValueError(lowercase_ ) lowercase =np.zeros((rows, columns) ) lowercase =np.zeros((rows, columns) ) for i in range(lowercase_ ): for j in range(lowercase_ ): lowercase =sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) ) if upper[j][j] == 0: raise ArithmeticError('''No LU decomposition exists''' ) lowercase =(table[i][j] - total) / upper[j][j] lowercase =1 for j in range(lowercase_ , lowercase_ ): lowercase =sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) ) lowercase =table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
145
0
'''simple docstring''' _UpperCamelCase : Optional[Any] = 6_55_21 def __snake_case ( lowerCAmelCase : str ): __UpperCAmelCase = 1 __UpperCAmelCase = 0 for plain_chr in plain_text: __UpperCAmelCase = (a + ord(lowerCAmelCase )) % MOD_ADLER __UpperCAmelCase = (b + a) % MOD_ADLER return (b << 16) | a
396
'''simple docstring''' import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : Optional[int] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _UpperCamelCase : str = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } _UpperCamelCase : int = {'facebook/blenderbot-3B': 1_28} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __snake_case ( ): __UpperCAmelCase = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) __UpperCAmelCase = bs[:] __UpperCAmelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCAmelCase ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase = [chr(lowerCAmelCase ) for n in cs] return dict(zip(lowerCAmelCase , lowerCAmelCase ) ) def __snake_case ( lowerCAmelCase : List[Any] ): __UpperCAmelCase = set() __UpperCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase = char return pairs class _lowercase( _lowerCamelCase ): """simple docstring""" __lowerCamelCase = VOCAB_FILES_NAMES __lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP __lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowerCamelCase = ['''input_ids''', '''attention_mask'''] def __init__( self: Union[str, Any] ,a: Tuple ,a: Dict ,a: Dict="replace" ,a: int="<s>" ,a: List[str]="</s>" ,a: Any="</s>" ,a: str="<s>" ,a: Dict="<unk>" ,a: Union[str, Any]="<pad>" ,a: Optional[int]="<mask>" ,a: int=False ,**a: int ,): __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else bos_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else eos_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else sep_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else cls_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else unk_token __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase = AddedToken(a ,lstrip=a ,rstrip=a ) if isinstance(a ,a ) else mask_token super().__init__( errors=a ,bos_token=a ,eos_token=a ,unk_token=a ,sep_token=a ,cls_token=a ,pad_token=a ,mask_token=a ,add_prefix_space=a ,**a ,) with open(a ,encoding='utf-8' ) as vocab_handle: __UpperCAmelCase = json.load(a ) __UpperCAmelCase = {v: k for k, v in self.encoder.items()} __UpperCAmelCase = errors # how to handle errors in decoding __UpperCAmelCase = bytes_to_unicode() __UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()} with open(a ,encoding='utf-8' ) as merges_handle: __UpperCAmelCase = merges_handle.read().split('\n' )[1:-1] __UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase = dict(zip(a ,range(len(a ) ) ) ) __UpperCAmelCase = {} __UpperCAmelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def snake_case ( self: Optional[Any] ): return len(self.encoder ) def snake_case ( self: Optional[Any] ): return dict(self.encoder ,**self.added_tokens_encoder ) def snake_case ( self: Optional[int] ,a: Optional[int] ): if token in self.cache: return self.cache[token] __UpperCAmelCase = tuple(a ) __UpperCAmelCase = get_pairs(a ) if not pairs: return token while True: __UpperCAmelCase = min(a ,key=lambda a : self.bpe_ranks.get(a ,float('inf' ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase , __UpperCAmelCase = bigram __UpperCAmelCase = [] __UpperCAmelCase = 0 while i < len(a ): try: __UpperCAmelCase = word.index(a ,a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase = j if word[i] == first and i < len(a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase = tuple(a ) __UpperCAmelCase = new_word if len(a ) == 1: break else: __UpperCAmelCase = get_pairs(a ) __UpperCAmelCase = ' '.join(a ) __UpperCAmelCase = word return word def snake_case ( self: int ,a: str ): __UpperCAmelCase = [] for token in re.findall(self.pat ,a ): __UpperCAmelCase = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a ).split(' ' ) ) return bpe_tokens def snake_case ( self: Optional[Any] ,a: Union[str, Any] ): return self.encoder.get(a ,self.encoder.get(self.unk_token ) ) def snake_case ( self: Any ,a: Union[str, Any] ): return self.decoder.get(a ) def snake_case ( self: Dict ,a: Union[str, Any] ): __UpperCAmelCase = ''.join(a ) __UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors ) return text def snake_case ( self: Optional[Any] ,a: str ,a: Optional[str] = None ): if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __UpperCAmelCase = os.path.join( a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __UpperCAmelCase = os.path.join( a ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(a ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=a ,ensure_ascii=a ) + '\n' ) __UpperCAmelCase = 0 with open(a ,'w' ,encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda a : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ' Please check that the tokenizer is not corrupted!' ) __UpperCAmelCase = token_index writer.write(' '.join(a ) + '\n' ) index += 1 return vocab_file, merge_file def snake_case ( self: List[str] ,a: List[int] ,a: Optional[List[int]] = None ,a: bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a ,token_ids_a=a ,already_has_special_tokens=a ) if token_ids_a is None: return [1] + ([0] * len(a )) + [1] return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a )) + [1] def snake_case ( self: Optional[int] ,a: List[int] ,a: Optional[List[int]] = None ): __UpperCAmelCase = [self.sep_token_id] __UpperCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def snake_case ( self: Dict ,a: List[Any] ,a: Optional[int]=False ,**a: Optional[Any] ): __UpperCAmelCase = kwargs.pop('add_prefix_space' ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(a ) > 0 and not text[0].isspace()): __UpperCAmelCase = ' ' + text return (text, kwargs) def snake_case ( self: Tuple ,a: List[int] ,a: Optional[List[int]] = None ): return token_ids_a + [self.eos_token_id] def snake_case ( self: Any ,a: "Conversation" ): __UpperCAmelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(a ) __UpperCAmelCase = ' '.join(a ) __UpperCAmelCase = self.encode(a ) if len(a ) > self.model_max_length: __UpperCAmelCase = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
396
1
import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import BatchEncoding, MarianTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available if is_sentencepiece_available(): from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin __magic_name__ = get_tests_dir('''fixtures/test_sentencepiece.model''') __magic_name__ = {'''target_lang''': '''fi''', '''source_lang''': '''en'''} __magic_name__ = '''>>zh<<''' __magic_name__ = '''Helsinki-NLP/''' if is_torch_available(): __magic_name__ = '''pt''' elif is_tf_available(): __magic_name__ = '''tf''' else: __magic_name__ = '''jax''' @require_sentencepiece class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = MarianTokenizer __UpperCAmelCase : List[str] = False __UpperCAmelCase : List[str] = True def _UpperCamelCase ( self ): super().setUp() lowerCamelCase_ : Tuple = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] lowerCamelCase_ : List[Any] = dict(zip(_a , range(len(_a ) ) ) ) lowerCamelCase_ : List[Any] = Path(self.tmpdirname ) save_json(_a , save_dir / VOCAB_FILES_NAMES["vocab"] ) save_json(_a , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] ) if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists(): copyfile(_a , save_dir / VOCAB_FILES_NAMES["source_spm"] ) copyfile(_a , save_dir / VOCAB_FILES_NAMES["target_spm"] ) lowerCamelCase_ : List[Any] = MarianTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCamelCase ( self , **a_ ): return MarianTokenizer.from_pretrained(self.tmpdirname , **_a ) def _UpperCamelCase ( self , a_ ): return ( "This is a test", "This is a test", ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = "</s>" lowerCamelCase_ : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(_a ) , 9 ) def _UpperCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 9 ) def _UpperCamelCase ( self ): lowerCamelCase_ : Union[str, Any] = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" ) lowerCamelCase_ : Tuple = en_de_tokenizer(["I am a small frog"] , return_tensors=_a ) self.assertIsInstance(_a , _a ) lowerCamelCase_ : List[str] = [38, 121, 14, 697, 3_8848, 0] self.assertListEqual(_a , batch.input_ids[0] ) lowerCamelCase_ : Optional[Any] = tempfile.mkdtemp() en_de_tokenizer.save_pretrained(_a ) lowerCamelCase_ : Tuple = [x.name for x in Path(_a ).glob("*" )] self.assertIn("source.spm" , _a ) MarianTokenizer.from_pretrained(_a ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = self.get_tokenizer() lowerCamelCase_ : str = tok( ["I am a small frog" * 1000, "I am a small frog"] , padding=_a , truncation=_a , return_tensors=_a ) self.assertIsInstance(_a , _a ) self.assertEqual(batch.input_ids.shape , (2, 512) ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = self.get_tokenizer() lowerCamelCase_ : List[Any] = tok(["I am a tiny frog", "I am a small frog"] , padding=_a , return_tensors=_a ) self.assertIsInstance(_a , _a ) self.assertEqual(batch_smaller.input_ids.shape , (2, 10) ) @slow def _UpperCamelCase ( self ): # fmt: off lowerCamelCase_ : List[Any] = {"input_ids": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" ) lowerCamelCase_ : Optional[int] = "Tämä on testi" lowerCamelCase_ : Tuple = "This is a test" lowerCamelCase_ : int = [76, 7, 2047, 2] lowerCamelCase_ : Optional[int] = [69, 12, 11, 940, 2] lowerCamelCase_ : Optional[int] = tokenizer(_a ).input_ids self.assertListEqual(_a , _a ) lowerCamelCase_ : Optional[int] = tokenizer(text_target=_a ).input_ids self.assertListEqual(_a , _a ) lowerCamelCase_ : Any = tokenizer.decode(_a , skip_special_tokens=_a ) self.assertEqual(_a , _a )
721
from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class lowerCAmelCase__ : """simple docstring""" # setable values __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Optional[jnp.ndarray] = None __UpperCAmelCase : Optional[jnp.ndarray] = None # sigma(t_i) @classmethod def _UpperCamelCase ( cls ): return cls() @dataclass class lowerCAmelCase__ ( __lowerCamelCase ): """simple docstring""" __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : jnp.ndarray __UpperCAmelCase : KarrasVeSchedulerState class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase ): """simple docstring""" @property def _UpperCamelCase ( self ): return True @register_to_config def __init__( self , a_ = 0.02 , a_ = 100 , a_ = 1.0_07 , a_ = 80 , a_ = 0.05 , a_ = 50 , ): pass def _UpperCamelCase ( self ): return KarrasVeSchedulerState.create() def _UpperCamelCase ( self , a_ , a_ , a_ = () ): lowerCamelCase_ : List[Any] = jnp.arange(0 , a_ )[::-1].copy() lowerCamelCase_ : List[str] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=a_ , schedule=jnp.array(a_ , dtype=jnp.floataa ) , timesteps=a_ , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , ): if self.config.s_min <= sigma <= self.config.s_max: lowerCamelCase_ : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: lowerCamelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) lowerCamelCase_ : Union[str, Any] = random.split(a_ , num=1 ) lowerCamelCase_ : str = self.config.s_noise * random.normal(key=a_ , shape=sample.shape ) lowerCamelCase_ : List[str] = sigma + gamma * sigma lowerCamelCase_ : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : List[str] = sample_hat + sigma_hat * model_output lowerCamelCase_ : Union[str, Any] = (sample_hat - pred_original_sample) / sigma_hat lowerCamelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ = True , ): lowerCamelCase_ : Optional[Any] = sample_prev + sigma_prev * model_output lowerCamelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev lowerCamelCase_ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=a_ , derivative=a_ , state=a_ ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ): raise NotImplementedError()
73
0
'''simple docstring''' from __future__ import annotations def __snake_case ( lowercase : str , lowercase : str ): snake_case_ = get_failure_array(lowercase ) # 2) Step through text searching for pattern snake_case_ , snake_case_ = 0, 0 # index into text, pattern while i < len(lowercase ): if pattern[j] == text[i]: if j == (len(lowercase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: snake_case_ = failure[j - 1] continue i += 1 return False def __snake_case ( lowercase : str ): snake_case_ = [0] snake_case_ = 0 snake_case_ = 1 while j < len(lowercase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: snake_case_ = failure[i - 1] continue j += 1 failure.append(lowercase ) return failure if __name__ == "__main__": # Test 1) lowercase__ = '''abc1abc12''' lowercase__ = '''alskfjaldsabc1abc1abc12k23adsfabcabc''' lowercase__ = '''alskfjaldsk23adsfabcabc''' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) lowercase__ = '''ABABX''' lowercase__ = '''ABABZABABYABABX''' assert kmp(pattern, text) # Test 3) lowercase__ = '''AAAB''' lowercase__ = '''ABAAAAAB''' assert kmp(pattern, text) # Test 4) lowercase__ = '''abcdabcy''' lowercase__ = '''abcxabcdabxabcdabcdabcy''' assert kmp(pattern, text) # Test 5) lowercase__ = '''aabaabaaa''' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
508
'''simple docstring''' def __snake_case ( lowercase : list ): if len(lowercase ) <= 1: return [tuple(lowercase )] snake_case_ = [] def generate(lowercase : int , lowercase : list ): snake_case_ = [0] * n res.append(tuple(lowercase ) ) snake_case_ = 0 while i < n: if c[i] < i: if i % 2 == 0: snake_case_ , snake_case_ = arr[i], arr[0] else: snake_case_ , snake_case_ = arr[i], arr[c[i]] res.append(tuple(lowercase ) ) c[i] += 1 snake_case_ = 0 else: snake_case_ = 0 i += 1 generate(len(lowercase ) , lowercase ) return res if __name__ == "__main__": lowercase__ = input('''Enter numbers separated by a comma:\n''').strip() lowercase__ = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
508
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) _UpperCAmelCase : str = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Optional[Any] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
708
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( snake_case__ ): UpperCamelCase_ :Tuple = ['image_processor', 'tokenizer'] UpperCamelCase_ :Tuple = 'ViTImageProcessor' UpperCamelCase_ :Dict = ('CLIPTokenizer', 'CLIPTokenizerFast') def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ): lowerCAmelCase__ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , SCREAMING_SNAKE_CASE_ , ) lowerCAmelCase__ = kwargs.pop('''feature_extractor''' ) lowerCAmelCase__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __call__( self : Dict , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , **SCREAMING_SNAKE_CASE_ : Optional[int] ): if text is None and visual_prompt is None and images is None: raise ValueError('''You have to specify either text, visual prompt or images.''' ) if text is not None and visual_prompt is not None: raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' ) if text is not None: lowerCAmelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if visual_prompt is not None: lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if images is not None: lowerCAmelCase__ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if visual_prompt is not None and images is not None: lowerCAmelCase__ = { '''pixel_values''': image_features.pixel_values, '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding elif text is not None and images is not None: lowerCAmelCase__ = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: lowerCAmelCase__ = { '''conditional_pixel_values''': prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Union[str, Any] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : str ): return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def __snake_case ( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Tuple ): return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) @property def __snake_case ( self : str ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor_class @property def __snake_case ( self : Tuple ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , SCREAMING_SNAKE_CASE_ , ) return self.image_processor
288
0