code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''pixel_values'''] def __init__( self : Optional[Any] , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Dict , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {'''shortest_edge''': 384} __SCREAMING_SNAKE_CASE : int = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = do_resize __SCREAMING_SNAKE_CASE : Tuple = size # Default value set here for backwards compatibility where the value in config is None __SCREAMING_SNAKE_CASE : Any = crop_pct if crop_pct is not None else 224 / 256 __SCREAMING_SNAKE_CASE : List[Any] = resample __SCREAMING_SNAKE_CASE : Tuple = do_rescale __SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor __SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize __SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCAmelCase__ ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : str , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = get_size_dict(_A , default_to_square=_A ) if "shortest_edge" not in size: raise ValueError(F'''Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}''' ) __SCREAMING_SNAKE_CASE : Tuple = size['''shortest_edge'''] if shortest_edge < 384: # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct __SCREAMING_SNAKE_CASE : int = int(shortest_edge / crop_pct ) __SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(_A , size=_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Dict = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A ) # then crop to (shortest_edge, shortest_edge) return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A ) else: # warping (no cropping) when evaluated at 384 or larger return resize( _A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : str , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ): """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Tuple , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE : Dict = crop_pct if crop_pct is not None else self.crop_pct __SCREAMING_SNAKE_CASE : str = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE : Any = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE : List[str] = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE : List[str] = size if size is not None else self.size __SCREAMING_SNAKE_CASE : Tuple = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_resize and size["shortest_edge"] < 384 and crop_pct is None: raise ValueError('''crop_pct must be specified if size < 384.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE : Optional[int] = [to_numpy_array(_A ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE : Tuple = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE : int = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE : List[str] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(_A , _A ) for image in images] __SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images} return BatchFeature(data=_A , tensor_type=_A )
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''mobilenet_v2''' def __init__( self : int , _A : Dict=3 , _A : Optional[int]=224 , _A : int=1.0 , _A : List[Any]=8 , _A : Optional[int]=8 , _A : Any=6 , _A : Any=32 , _A : List[Any]=True , _A : Optional[int]=True , _A : int="relu6" , _A : str=True , _A : List[str]=0.8 , _A : str=0.02 , _A : Optional[Any]=0.0_01 , _A : Optional[Any]=255 , **_A : List[Any] , ): """simple docstring""" super().__init__(**_A ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) __SCREAMING_SNAKE_CASE : Tuple = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : List[Any] = depth_multiplier __SCREAMING_SNAKE_CASE : str = depth_divisible_by __SCREAMING_SNAKE_CASE : Union[str, Any] = min_depth __SCREAMING_SNAKE_CASE : int = expand_ratio __SCREAMING_SNAKE_CASE : str = output_stride __SCREAMING_SNAKE_CASE : Optional[int] = first_layer_is_expansion __SCREAMING_SNAKE_CASE : Tuple = finegrained_output __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act __SCREAMING_SNAKE_CASE : Any = tf_padding __SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range __SCREAMING_SNAKE_CASE : str = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def UpperCAmelCase__ ( self : int ): """simple docstring""" if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return 1e-4
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) ) self.parent.assertTrue(hasattr(_A , '''num_heads''' ) ) class __UpperCamelCase : """simple docstring""" def __init__( self : List[str] , _A : List[Any] , _A : Optional[int]=13 , _A : Optional[Any]=64 , _A : Optional[Any]=3 , _A : Dict=[16, 48, 96] , _A : Optional[int]=[1, 3, 6] , _A : str=[1, 2, 10] , _A : Dict=[7, 3, 3] , _A : Tuple=[4, 2, 2] , _A : Optional[int]=[2, 1, 1] , _A : List[Any]=[2, 2, 2] , _A : int=[False, False, True] , _A : int=[0.0, 0.0, 0.0] , _A : Dict=0.02 , _A : int=1e-12 , _A : int=True , _A : List[str]=True , _A : Optional[int]=2 , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : List[str] = patch_sizes __SCREAMING_SNAKE_CASE : str = patch_stride __SCREAMING_SNAKE_CASE : Tuple = patch_padding __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : str = num_labels __SCREAMING_SNAKE_CASE : Tuple = num_channels __SCREAMING_SNAKE_CASE : Tuple = embed_dim __SCREAMING_SNAKE_CASE : Any = num_heads __SCREAMING_SNAKE_CASE : str = stride_kv __SCREAMING_SNAKE_CASE : Tuple = depth __SCREAMING_SNAKE_CASE : List[Any] = cls_token __SCREAMING_SNAKE_CASE : List[str] = attention_drop_rate __SCREAMING_SNAKE_CASE : Optional[int] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None if self.use_labels: # create a random int32 tensor of given shape __SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : int = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : int ): """simple docstring""" return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] , _A : List[str] , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtModel(config=_A ) __SCREAMING_SNAKE_CASE : List[str] = model(_A , training=_A ) __SCREAMING_SNAKE_CASE : List[Any] = (self.image_size, self.image_size) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = image_size[0], image_size[1] for i in range(len(self.depth ) ): __SCREAMING_SNAKE_CASE : List[str] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __SCREAMING_SNAKE_CASE : Tuple = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , labels=_A , training=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = config_and_inputs __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () lowerCAmelCase_ = ( {'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification} if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = TFCvtModelTester(self ) __SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason='''Cvt does not output attentions''' ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip(reason='''Cvt does not use inputs_embeds''' ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip(reason='''Cvt does not support input and output embeddings''' ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) def UpperCAmelCase__ ( self : int ): """simple docstring""" super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().test_keras_fit() @unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = tf.keras.mixed_precision.Policy('''mixed_float16''' ) tf.keras.mixed_precision.set_global_policy(_A ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy('''float32''' ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A ) __SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" def check_hidden_states_output(_A : int , _A : int , _A : int ): __SCREAMING_SNAKE_CASE : Optional[int] = model_class(_A ) __SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(_A , _A ) ) __SCREAMING_SNAKE_CASE : str = outputs.hidden_states __SCREAMING_SNAKE_CASE : str = len(self.model_tester.depth ) self.assertEqual(len(_A ) , _A ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Dict = True check_hidden_states_output(_A , _A , _A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE : int = True check_hidden_states_output(_A , _A , _A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A ) @slow def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[int] = TFCvtModel.from_pretrained(_A ) self.assertIsNotNone(_A ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __SCREAMING_SNAKE_CASE : Optional[Any] = self.default_image_processor __SCREAMING_SNAKE_CASE : Union[str, Any] = prepare_img() __SCREAMING_SNAKE_CASE : int = image_processor(images=_A , return_tensors='''tf''' ) # forward pass __SCREAMING_SNAKE_CASE : Union[str, Any] = model(**_A ) # verify the logits __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _A ) __SCREAMING_SNAKE_CASE : Any = tf.constant([0.92_85, 0.90_15, -0.31_50] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer lowercase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowercase_ = { """vocab_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt""" ), """google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""", """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """google/electra-small-generator""": ( """https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json""" ), """google/electra-base-generator""": ( """https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json""" ), """google/electra-large-generator""": ( """https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json""" ), """google/electra-small-discriminator""": ( """https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json""" ), """google/electra-base-discriminator""": ( """https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json""" ), """google/electra-large-discriminator""": ( """https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json""" ), }, } lowercase_ = { """google/electra-small-generator""": 512, """google/electra-base-generator""": 512, """google/electra-large-generator""": 512, """google/electra-small-discriminator""": 512, """google/electra-base-discriminator""": 512, """google/electra-large-discriminator""": 512, } lowercase_ = { """google/electra-small-generator""": {"""do_lower_case""": True}, """google/electra-base-generator""": {"""do_lower_case""": True}, """google/electra-large-generator""": {"""do_lower_case""": True}, """google/electra-small-discriminator""": {"""do_lower_case""": True}, """google/electra-base-discriminator""": {"""do_lower_case""": True}, """google/electra-large-discriminator""": {"""do_lower_case""": True}, } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ElectraTokenizer def __init__( self : Union[str, Any] , _A : Dict=None , _A : Tuple=None , _A : List[str]=True , _A : Tuple="[UNK]" , _A : Optional[Any]="[SEP]" , _A : Union[str, Any]="[PAD]" , _A : str="[CLS]" , _A : List[Any]="[MASK]" , _A : str=True , _A : List[str]=None , **_A : Any , ): """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , ) __SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , _A ) != do_lower_case or normalizer_state.get('''strip_accents''' , _A ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , _A ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE : str = getattr(_A , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : Tuple = do_lower_case __SCREAMING_SNAKE_CASE : List[str] = strip_accents __SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars __SCREAMING_SNAKE_CASE : Optional[Any] = normalizer_class(**_A ) __SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case def UpperCAmelCase__ ( self : int , _A : Tuple , _A : Dict=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCAmelCase__ ( self : Dict , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCAmelCase__ ( self : Any , _A : str , _A : Optional[str] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self._tokenizer.model.save(_A , name=_A ) return tuple(_A )
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask lowercase_ = logging.getLogger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , _A : Optional[Any]=-1 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = label_idx def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : Union[Split, str] ): """simple docstring""" if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : List[str] = mode.value __SCREAMING_SNAKE_CASE : str = os.path.join(_A , F'''{mode}.txt''' ) __SCREAMING_SNAKE_CASE : List[Any] = 1 __SCREAMING_SNAKE_CASE : List[str] = [] with open(_A , encoding='''utf-8''' ) as f: __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Dict = [] for line in f: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_A , labels=_A ) ) guid_index += 1 __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] else: __SCREAMING_SNAKE_CASE : int = line.split(''' ''' ) words.append(splits[0] ) if len(_A ) > 1: labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) ) else: # Examples could have no label for mode = "test" labels.append('''O''' ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_A , labels=_A ) ) return examples def UpperCAmelCase__ ( self : List[str] , _A : TextIO , _A : TextIO , _A : List ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 0 for line in test_input_reader: if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n": writer.write(_A ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: __SCREAMING_SNAKE_CASE : Optional[Any] = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n''' writer.write(_A ) else: logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" if path: with open(_A , '''r''' ) as f: __SCREAMING_SNAKE_CASE : Tuple = f.read().splitlines() if "O" not in labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''O'''] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple ): """simple docstring""" super().__init__(label_idx=-2 ) def UpperCAmelCase__ ( self : Any , _A : str ): """simple docstring""" if path: with open(_A , '''r''' ) as f: __SCREAMING_SNAKE_CASE : Tuple = f.read().splitlines() if "O" not in labels: __SCREAMING_SNAKE_CASE : str = ['''O'''] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : int , _A : List[str] , _A : Union[Split, str] ): """simple docstring""" if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = mode.value __SCREAMING_SNAKE_CASE : List[str] = os.path.join(_A , F'''{mode}.txt''' ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = [] with open(_A , encoding='''utf-8''' ) as f: for sentence in parse_incr(_A ): __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : List[Any] = [] for token in sentence: words.append(token['''form'''] ) labels.append(token['''upos'''] ) assert len(_A ) == len(_A ) if words: examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=_A , labels=_A ) ) guid_index += 1 return examples def UpperCAmelCase__ ( self : Optional[int] , _A : TextIO , _A : TextIO , _A : List ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = 0 for sentence in parse_incr(_A ): __SCREAMING_SNAKE_CASE : int = preds_list[example_id] __SCREAMING_SNAKE_CASE : Any = '''''' for token in sentence: out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) ''' out += "\n" writer.write(_A ) example_id += 1 def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" if path: with open(_A , '''r''' ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowercase_ = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowercase_ = concatenate_datasets lowercase_ = DownloadConfig lowercase_ = DownloadManager lowercase_ = DownloadMode lowercase_ = DownloadConfig lowercase_ = DownloadMode lowercase_ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
import argparse import os import re import packaging.version lowercase_ = """examples/""" lowercase_ = { """examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""), """doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } lowercase_ = { """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } lowercase_ = """README.md""" def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[Any] = f.read() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = REPLACE_PATTERNS[pattern] __SCREAMING_SNAKE_CASE : List[Any] = replace.replace('''VERSION''' , snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = re_pattern.sub(snake_case , snake_case ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(snake_case ) def a__ ( snake_case ): """simple docstring""" for folder, directories, fnames in os.walk(snake_case ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(snake_case , snake_case ) , snake_case , pattern='''examples''' ) def a__ ( snake_case , snake_case=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(snake_case , snake_case , snake_case ) if not patch: update_version_in_examples(snake_case ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = '''🤗 Transformers currently provides the following architectures''' __SCREAMING_SNAKE_CASE : Union[str, Any] = '''1. Want to contribute a new model?''' with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Find the start of the list. __SCREAMING_SNAKE_CASE : Any = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __SCREAMING_SNAKE_CASE : Tuple = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __SCREAMING_SNAKE_CASE : int = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) def a__ ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __SCREAMING_SNAKE_CASE : Union[str, Any] = f.read() __SCREAMING_SNAKE_CASE : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(snake_case ).groups()[0] return packaging.version.parse(snake_case ) def a__ ( snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __SCREAMING_SNAKE_CASE : List[Any] = default_version.base_version elif patch: __SCREAMING_SNAKE_CASE : Optional[int] = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}''' else: __SCREAMING_SNAKE_CASE : List[Any] = F'''{default_version.major}.{default_version.minor + 1}.0''' # Now let's ask nicely if that's the right one. __SCREAMING_SNAKE_CASE : Any = input(F'''Which version are you releasing? [{default_version}]''' ) if len(snake_case ) == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = default_version print(F'''Updating version to {version}.''' ) global_version_update(snake_case , patch=snake_case ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = get_version() __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{current_version.major}.{current_version.minor + 1}.0.dev0''' __SCREAMING_SNAKE_CASE : Optional[Any] = current_version.base_version # Check with the user we got that right. __SCREAMING_SNAKE_CASE : Dict = input(F'''Which version are we developing now? [{dev_version}]''' ) if len(snake_case ) == 0: __SCREAMING_SNAKE_CASE : Optional[Any] = dev_version print(F'''Updating version to {version}.''' ) global_version_update(snake_case ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") lowercase_ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_torch, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MgpstrProcessor, ViTImageProcessor @require_torch @require_vision class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ViTImageProcessor if is_vision_available() else None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = (3, 32, 128) __SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() # fmt: off __SCREAMING_SNAKE_CASE : List[Any] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(_A , range(len(_A ) ) ) ) __SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = { '''do_normalize''': False, '''do_resize''': True, '''image_processor_type''': '''ViTImageProcessor''', '''resample''': 3, '''size''': {'''height''': 32, '''width''': 128}, } __SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCAmelCase__ ( self : Optional[Any] , **_A : Optional[Any] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : int , **_A : List[str] ): """simple docstring""" return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta ) __SCREAMING_SNAKE_CASE : Dict = Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) return image_input def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : int = self.get_image_processor() __SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_A , image_processor=_A ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __SCREAMING_SNAKE_CASE : str = self.get_image_processor(do_normalize=_A , padding_value=1.0 ) __SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 ) self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.char_tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor() __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : str = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : Tuple = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : Any = image_processor(_A , return_tensors='''np''' ) __SCREAMING_SNAKE_CASE : int = processor(images=_A , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : int = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Any = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : str = '''test''' __SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : str = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''test''' __SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : str = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''labels'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_image_processor() __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]] __SCREAMING_SNAKE_CASE : Dict = processor.char_decode(_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(_A ) __SCREAMING_SNAKE_CASE : Tuple = [seq.replace(''' ''' , '''''' ) for seq in decoded_tok] self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.get_image_processor() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[int] = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : List[str] = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_image_processor() __SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Tuple = MgpstrProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(1 , 27 , 38 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.randn(1 , 27 , 5_0257 ) __SCREAMING_SNAKE_CASE : str = torch.randn(1 , 27 , 3_0522 ) __SCREAMING_SNAKE_CASE : str = processor.batch_decode([char_input, bpe_input, wp_input] ) self.assertListEqual(list(results.keys() ) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''] )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''M-CLIP''' def __init__( self : Optional[int] , _A : Dict=1024 , _A : Any=768 , **_A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = transformerDimSize __SCREAMING_SNAKE_CASE : Optional[Any] = imageDimSize super().__init__(**_A ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = MCLIPConfig def __init__( self : str , _A : List[Any] , *_A : Tuple , **_A : List[str] ): """simple docstring""" super().__init__(_A , *_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = XLMRobertaModel(_A ) __SCREAMING_SNAKE_CASE : str = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def UpperCAmelCase__ ( self : List[str] , _A : List[Any] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.transformer(input_ids=_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : List[Any] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(_A ), embs
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
import torch def a__ ( ): """simple docstring""" if torch.cuda.is_available(): __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.device_count() else: __SCREAMING_SNAKE_CASE : Any = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
def a__ ( snake_case ): """simple docstring""" return "".join(chr(ord(snake_case ) - 32 ) if '''a''' <= char <= '''z''' else char for char in word ) if __name__ == "__main__": from doctest import testmod testmod()
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = AutoencoderKL lowerCAmelCase_ = '''sample''' lowerCAmelCase_ = 1E-2 @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = 4 __SCREAMING_SNAKE_CASE : List[str] = 3 __SCREAMING_SNAKE_CASE : int = (32, 32) __SCREAMING_SNAKE_CASE : Any = floats_tensor((batch_size, num_channels) + sizes ).to(_A ) return {"sample": image} @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" return (3, 32, 32) @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return (3, 32, 32) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { '''block_out_channels''': [32, 64], '''in_channels''': 3, '''out_channels''': 3, '''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], '''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], '''latent_channels''': 4, } __SCREAMING_SNAKE_CASE : str = self.dummy_input return init_dict, inputs_dict def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" pass def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" pass @unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = self.prepare_init_args_and_inputs_for_common() __SCREAMING_SNAKE_CASE : Any = self.model_class(**_A ) model.to(_A ) assert not model.is_gradient_checkpointing and model.training __SCREAMING_SNAKE_CASE : Optional[int] = model(**_A ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __SCREAMING_SNAKE_CASE : Tuple = torch.randn_like(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __SCREAMING_SNAKE_CASE : Optional[int] = self.model_class(**_A ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_A ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __SCREAMING_SNAKE_CASE : str = model_a(**_A ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __SCREAMING_SNAKE_CASE : int = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) __SCREAMING_SNAKE_CASE : Optional[int] = dict(model.named_parameters() ) __SCREAMING_SNAKE_CASE : int = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 ) model.to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' ) __SCREAMING_SNAKE_CASE : Any = model.to(_A ) model.eval() if torch_device == "mps": __SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(0 ) else: __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device=_A ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : str = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __SCREAMING_SNAKE_CASE : List[Any] = image.to(_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = model(_A , sample_posterior=_A , generator=_A ).sample __SCREAMING_SNAKE_CASE : Optional[Any] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] ) else: __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor( [-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] ) self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) @slow class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Any ): """simple docstring""" return F'''gaussian_noise_s={seed}_shape={"_".join([str(_A ) for s in shape] )}.npy''' def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : int , _A : str=0 , _A : Any=(4, 3, 512, 512) , _A : int=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE : Any = torch.from_numpy(load_hf_numpy(self.get_file_format(_A , _A ) ) ).to(_A ).to(_A ) return image def UpperCAmelCase__ ( self : Tuple , _A : Any="CompVis/stable-diffusion-v1-4" , _A : Dict=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = '''fp16''' if fpaa else None __SCREAMING_SNAKE_CASE : str = torch.floataa if fpaa else torch.floataa __SCREAMING_SNAKE_CASE : Tuple = AutoencoderKL.from_pretrained( _A , subfolder='''vae''' , torch_dtype=_A , revision=_A , ) model.to(_A ).eval() return model def UpperCAmelCase__ ( self : Dict , _A : Optional[int]=0 ): """simple docstring""" if torch_device == "mps": return torch.manual_seed(_A ) return torch.Generator(device=_A ).manual_seed(_A ) @parameterized.expand( [ # fmt: off [33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str , _A : Optional[int] , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE : str = self.get_sd_image(_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.get_generator(_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , generator=_A , sample_posterior=_A ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE : Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_A , _A , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]], [47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase__ ( self : Tuple , _A : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_sd_vae_model(fpaa=_A ) __SCREAMING_SNAKE_CASE : Any = self.get_sd_image(_A , fpaa=_A ) __SCREAMING_SNAKE_CASE : int = self.get_generator(_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Dict = model(_A , generator=_A , sample_posterior=_A ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(_A ) assert torch_all_close(_A , _A , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]], [47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]], # fmt: on ] ) def UpperCAmelCase__ ( self : int , _A : Dict , _A : Any , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE : List[str] = self.get_sd_image(_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Any = model(_A ).sample assert sample.shape == image.shape __SCREAMING_SNAKE_CASE : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice ) assert torch_all_close(_A , _A , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]], [37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase__ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE : str = self.get_sd_image(_A , shape=(3, 4, 64, 64) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Tuple = model.decode(_A ).sample assert list(sample.shape ) == [3, 3, 512, 512] __SCREAMING_SNAKE_CASE : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu() __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(_A ) assert torch_all_close(_A , _A , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]], [16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]], # fmt: on ] ) @require_torch_gpu def UpperCAmelCase__ ( self : List[str] , _A : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model(fpaa=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_sd_image(_A , shape=(3, 4, 64, 64) , fpaa=_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Dict = model.decode(_A ).sample assert list(sample.shape ) == [3, 3, 512, 512] __SCREAMING_SNAKE_CASE : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu() __SCREAMING_SNAKE_CASE : Tuple = torch.tensor(_A ) assert torch_all_close(_A , _A , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_sd_vae_model(fpaa=_A ) __SCREAMING_SNAKE_CASE : Any = self.get_sd_image(_A , shape=(3, 4, 64, 64) , fpaa=_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : List[Any] = model.decode(_A ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_A ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_A , _A , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' ) def UpperCAmelCase__ ( self : Dict , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_image(_A , shape=(3, 4, 64, 64) ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model.decode(_A ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Any = model.decode(_A ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(_A , _A , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]], [47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]], # fmt: on ] ) def UpperCAmelCase__ ( self : List[str] , _A : Dict , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.get_sd_vae_model() __SCREAMING_SNAKE_CASE : int = self.get_sd_image(_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_generator(_A ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[int] = model.encode(_A ).latent_dist __SCREAMING_SNAKE_CASE : Optional[int] = dist.sample(generator=_A ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __SCREAMING_SNAKE_CASE : Union[str, Any] = sample[0, -1, -3:, -3:].flatten().cpu() __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(_A ) __SCREAMING_SNAKE_CASE : List[Any] = 3e-3 if torch_device != '''mps''' else 1e-2 assert torch_all_close(_A , _A , atol=_A )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowercase_ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def a__ ( snake_case ): """simple docstring""" config.addinivalue_line( '''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' ) config.addinivalue_line( '''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' ) config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' ) config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' ) config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' ) config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' ) def a__ ( snake_case ): """simple docstring""" from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case ) def a__ ( snake_case ): """simple docstring""" from transformers.testing_utils import pytest_terminal_summary_main __SCREAMING_SNAKE_CASE : List[Any] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(snake_case , id=snake_case ) def a__ ( snake_case , snake_case ): """simple docstring""" # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # Doctest custom flag to ignore output. lowercase_ = doctest.register_optionflag("""IGNORE_RESULT""") lowercase_ = doctest.OutputChecker class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : int , _A : Any , _A : Tuple , _A : Optional[int] ): """simple docstring""" if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , _A , _A , _A ) lowercase_ = CustomOutputChecker lowercase_ = HfDoctestModule lowercase_ = HfDocTestParser
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MgpstrTokenizer lowerCAmelCase_ = False lowerCAmelCase_ = {} lowerCAmelCase_ = False def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" super().setUp() # fmt: off __SCREAMING_SNAKE_CASE : Optional[int] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __SCREAMING_SNAKE_CASE : Optional[int] = dict(zip(_A , range(len(_A ) ) ) ) __SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_A ) + '''\n''' ) def UpperCAmelCase__ ( self : Tuple , **_A : List[str] ): """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : int , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''tester''' __SCREAMING_SNAKE_CASE : str = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" pass def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.get_tokenizers(do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode([special_token] , add_special_tokens=_A ) self.assertEqual(len(_A ) , 1 ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(_A , skip_special_tokens=_A ) self.assertTrue(special_token not in decoded ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = self.get_input_output_texts(_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A ) __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.convert_ids_to_tokens(_A ) self.assertNotEqual(len(_A ) , 0 ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(_A ) self.assertIsInstance(_A , _A ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , _A ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" pass
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
lowercase_ = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowercase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowercase_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
import math def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : List[str] = 2 __SCREAMING_SNAKE_CASE : Dict = int(math.sqrt(snake_case ) ) # Size of every segment __SCREAMING_SNAKE_CASE : Tuple = [True] * (end + 1) __SCREAMING_SNAKE_CASE : Dict = [] while start <= end: if temp[start] is True: in_prime.append(snake_case ) for i in range(start * start , end + 1 , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = False start += 1 prime += in_prime __SCREAMING_SNAKE_CASE : str = end + 1 __SCREAMING_SNAKE_CASE : Optional[Any] = min(2 * end , snake_case ) while low <= n: __SCREAMING_SNAKE_CASE : Dict = [True] * (high - low + 1) for each in in_prime: __SCREAMING_SNAKE_CASE : int = math.floor(low / each ) * each if t < low: t += each for j in range(snake_case , high + 1 , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = False for j in range(len(snake_case ) ): if temp[j] is True: prime.append(j + low ) __SCREAMING_SNAKE_CASE : List[str] = high + 1 __SCREAMING_SNAKE_CASE : List[str] = min(high + end , snake_case ) return prime print(sieve(10**6))
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", } lowercase_ = { """vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""}, """merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""}, } lowercase_ = { """ctrl""": 256, } lowercase_ = { """Pregnancy""": 168_629, """Christianity""": 7_675, """Explain""": 106_423, """Fitness""": 63_440, """Saving""": 63_163, """Ask""": 27_171, """Ass""": 95_985, """Joke""": 163_509, """Questions""": 45_622, """Thoughts""": 49_605, """Retail""": 52_342, """Feminism""": 164_338, """Writing""": 11_992, """Atheism""": 192_263, """Netflix""": 48_616, """Computing""": 39_639, """Opinion""": 43_213, """Alone""": 44_967, """Funny""": 58_917, """Gaming""": 40_358, """Human""": 4_088, """India""": 1_331, """Joker""": 77_138, """Diet""": 36_206, """Legal""": 11_859, """Norman""": 4_939, """Tip""": 72_689, """Weight""": 52_343, """Movies""": 46_273, """Running""": 23_425, """Science""": 2_090, """Horror""": 37_793, """Confession""": 60_572, """Finance""": 12_250, """Politics""": 16_360, """Scary""": 191_985, """Support""": 12_654, """Technologies""": 32_516, """Teenage""": 66_160, """Event""": 32_769, """Learned""": 67_460, """Notion""": 182_770, """Wikipedia""": 37_583, """Books""": 6_665, """Extract""": 76_050, """Confessions""": 102_701, """Conspiracy""": 75_932, """Links""": 63_674, """Narcissus""": 150_425, """Relationship""": 54_766, """Relationships""": 134_796, """Reviews""": 41_671, """News""": 4_256, """Translation""": 26_820, """multilingual""": 128_406, } def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = set() __SCREAMING_SNAKE_CASE : Any = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE : Dict = char __SCREAMING_SNAKE_CASE : int = set(snake_case ) return pairs class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = CONTROL_CODES def __init__( self : Dict , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any]="<unk>" , **_A : Any ): """simple docstring""" super().__init__(unk_token=_A , **_A ) with open(_A , encoding='''utf-8''' ) as vocab_handle: __SCREAMING_SNAKE_CASE : Optional[Any] = json.load(_A ) __SCREAMING_SNAKE_CASE : str = {v: k for k, v in self.encoder.items()} with open(_A , encoding='''utf-8''' ) as merges_handle: __SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split('''\n''' )[1:-1] __SCREAMING_SNAKE_CASE : List[Any] = [tuple(merge.split() ) for merge in merges] __SCREAMING_SNAKE_CASE : str = dict(zip(_A , range(len(_A ) ) ) ) __SCREAMING_SNAKE_CASE : int = {} @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self : Tuple , _A : List[str] ): """simple docstring""" if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE : List[str] = tuple(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = get_pairs(_A ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE : Optional[int] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = bigram __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : str = 0 while i < len(_A ): try: __SCREAMING_SNAKE_CASE : Optional[int] = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE : Tuple = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = new_word if len(_A ) == 1: break else: __SCREAMING_SNAKE_CASE : Dict = get_pairs(_A ) __SCREAMING_SNAKE_CASE : List[str] = '''@@ '''.join(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = word[:-4] __SCREAMING_SNAKE_CASE : Optional[int] = word return word def UpperCAmelCase__ ( self : Tuple , _A : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = [] __SCREAMING_SNAKE_CASE : List[Any] = re.findall(r'''\S+\n?''' , _A ) for token in words: split_tokens.extend(list(self.bpe(_A ).split(''' ''' ) ) ) return split_tokens def UpperCAmelCase__ ( self : int , _A : Optional[Any] ): """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Optional[int] , _A : Union[str, Any] ): """simple docstring""" return self.decoder.get(_A , self.unk_token ) def UpperCAmelCase__ ( self : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = ''' '''.join(_A ).replace('''@@ ''' , '''''' ).strip() return out_string def UpperCAmelCase__ ( self : Tuple , _A : str , _A : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : Tuple = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Optional[int] = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(_A , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '''\n''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = 0 with open(_A , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ''' Please check that the tokenizer is not corrupted!''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = token_index writer.write(''' '''.join(_A ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import unittest from dataclasses import dataclass import pytest from accelerate.commands.config.config_args import SageMakerConfig from accelerate.utils import ComputeEnvironment from accelerate.utils.launch import _convert_nargs_to_dict @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ComputeEnvironment.AMAZON_SAGEMAKER lowerCAmelCase_ = True lowerCAmelCase_ = '''ml.p3.2xlarge''' lowerCAmelCase_ = '''accelerate_sagemaker_execution_role''' lowerCAmelCase_ = '''hf-sm''' lowerCAmelCase_ = '''us-east-1''' lowerCAmelCase_ = 1 lowerCAmelCase_ = '''accelerate-sagemaker-1''' lowerCAmelCase_ = '''1.6''' lowerCAmelCase_ = '''4.4''' lowerCAmelCase_ = '''train.py''' lowerCAmelCase_ = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''False''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] lowerCAmelCase_ = [ '''--model_name_or_path''', '''bert''', '''--do_train''', '''--do_test''', '''False''', '''--do_predict''', '''--epochs''', '''3''', '''--learning_rate''', '''5e-5''', '''--max_steps''', '''50.5''', ] class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args ) assert isinstance(converted_args['''model_name_or_path'''] , _A ) assert isinstance(converted_args['''do_train'''] , _A ) assert isinstance(converted_args['''epochs'''] , _A ) assert isinstance(converted_args['''learning_rate'''] , _A ) assert isinstance(converted_args['''max_steps'''] , _A ) with pytest.raises(_A ): _convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = SwinConfig(image_size=192 ) if "base" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 6 __SCREAMING_SNAKE_CASE : Optional[int] = 128 __SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE : Dict = (4, 8, 16, 32) elif "large" in model_name: __SCREAMING_SNAKE_CASE : str = 12 __SCREAMING_SNAKE_CASE : Union[str, Any] = 192 __SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 18, 2) __SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48) else: raise ValueError('''Model not supported, only supports base and large variants''' ) __SCREAMING_SNAKE_CASE : List[str] = window_size __SCREAMING_SNAKE_CASE : Union[str, Any] = embed_dim __SCREAMING_SNAKE_CASE : Dict = depths __SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads return config def a__ ( snake_case ): """simple docstring""" if "encoder.mask_token" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' ) if "encoder.patch_embed.proj" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "encoder.patch_embed.norm" in name: __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if name == "encoder.norm.weight": __SCREAMING_SNAKE_CASE : List[Any] = '''layernorm.weight''' if name == "encoder.norm.bias": __SCREAMING_SNAKE_CASE : List[str] = '''layernorm.bias''' if "decoder" in name: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = '''swin.''' + name return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : List[Any] = orig_state_dict.pop(snake_case ) if "attn_mask" in key: pass elif "qkv" in key: __SCREAMING_SNAKE_CASE : Any = key.split('''.''' ) __SCREAMING_SNAKE_CASE : int = int(key_split[2] ) __SCREAMING_SNAKE_CASE : Tuple = int(key_split[4] ) __SCREAMING_SNAKE_CASE : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __SCREAMING_SNAKE_CASE : List[str] = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Optional[int] = val[ :dim ] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Any = val[ -dim: ] else: __SCREAMING_SNAKE_CASE : Tuple = val return orig_state_dict def a__ ( snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = torch.load(snake_case , map_location='''cpu''' )['''model'''] __SCREAMING_SNAKE_CASE : int = get_swin_config(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = SwinForMaskedImageModeling(snake_case ) model.eval() __SCREAMING_SNAKE_CASE : int = convert_state_dict(snake_case , snake_case ) model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __SCREAMING_SNAKE_CASE : Optional[int] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} ) __SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(snake_case , stream=snake_case ).raw ) __SCREAMING_SNAKE_CASE : Tuple = image_processor(images=snake_case , return_tensors='''pt''' ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(**snake_case ).logits print(outputs.keys() ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(snake_case ) if push_to_hub: print(F'''Pushing model and image processor for {model_name} to hub''' ) model.push_to_hub(F'''microsoft/{model_name}''' ) image_processor.push_to_hub(F'''microsoft/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""swin-base-simmim-window6-192""", type=str, choices=["""swin-base-simmim-window6-192""", """swin-large-simmim-window12-192"""], help="""Name of the Swin SimMIM model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default="""/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth""", type=str, help="""Path to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = TextToVideoSDPipeline lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. lowerCAmelCase_ = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''return_dict''', '''callback''', '''callback_steps''', ] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) __SCREAMING_SNAKE_CASE : Any = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , ) __SCREAMING_SNAKE_CASE : List[Any] = CLIPTextModel(_A ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE : Optional[int] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def UpperCAmelCase__ ( self : Dict , _A : List[Any] , _A : Tuple=0 ): """simple docstring""" if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : Any = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = TextToVideoSDPipeline(**_A ) __SCREAMING_SNAKE_CASE : int = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : Any = '''np''' __SCREAMING_SNAKE_CASE : Dict = sd_pipe(**_A ).frames __SCREAMING_SNAKE_CASE : Tuple = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase__ ( self : int ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A , expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" pass def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return super().test_progress_bar() @slow @skip_mps class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) __SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __SCREAMING_SNAKE_CASE : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __SCREAMING_SNAKE_CASE : Any = pipe.to('''cuda''' ) __SCREAMING_SNAKE_CASE : Optional[int] = '''Spiderman is surfing''' __SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = pipe(_A , generator=_A , num_inference_steps=25 , output_type='''pt''' ).frames __SCREAMING_SNAKE_CASE : Tuple = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) __SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) __SCREAMING_SNAKE_CASE : List[str] = pipe.to('''cuda''' ) __SCREAMING_SNAKE_CASE : List[str] = '''Spiderman is surfing''' __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe(_A , generator=_A , num_inference_steps=2 , output_type='''pt''' ).frames __SCREAMING_SNAKE_CASE : Union[str, Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
import re import tempfile from pathlib import Path import pytest import yaml from datasets.utils.readme import ReadMe # @pytest.fixture # def example_yaml_structure(): lowercase_ = yaml.safe_load( """\ name: \"\" allow_empty: false allow_empty_text: true subsections: - name: \"Dataset Card for X\" # First-level markdown heading allow_empty: false allow_empty_text: true subsections: - name: \"Table of Contents\" allow_empty: false allow_empty_text: false subsections: null - name: \"Dataset Description\" allow_empty: false allow_empty_text: false subsections: - name: \"Dataset Summary\" allow_empty: false allow_empty_text: false subsections: null - name: \"Supported Tasks and Leaderboards\" allow_empty: true allow_empty_text: true subsections: null - name: Languages allow_empty: false allow_empty_text: true subsections: null """ ) lowercase_ = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. #### Extra Ignored Subsection ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = { """name""": """root""", """text""": """""", """is_empty_text""": True, """subsections""": [ { """name""": """Dataset Card for My Dataset""", """text""": """""", """is_empty_text""": True, """subsections""": [ {"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []}, { """name""": """Dataset Description""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Dataset Summary""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": [ { """name""": """Extra Ignored Subsection""", """text""": """""", """is_empty_text""": True, """subsections""": [], } ], }, { """name""": """Supported Tasks and Leaderboards""", """text""": """""", """is_empty_text""": True, """subsections""": [], }, {"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []}, ], }, ], } ], } lowercase_ = """\ --- --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = ( """The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.""" ) lowercase_ = """\ # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = ( """The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.""" ) lowercase_ = """\ --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'.""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Languages Language Text """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.""" lowercase_ = """\ --- language: - zh - en --- ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text # Dataset Card My Dataset """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = """The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.""" lowercase_ = """""" lowercase_ = """The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.""" lowercase_ = """\ --- language: - zh - en --- # Dataset Card for My Dataset # Dataset Card for My Dataset ## Table of Contents Some text here. ## Dataset Description Some text here. ### Dataset Summary Some text here. ### Supported Tasks and Leaderboards ### Languages Language Text """ lowercase_ = """The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.""" @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" assert ReadMe.from_string(snake_case , snake_case ).to_dict() == expected_dict @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" with pytest.raises(snake_case , match=re.escape(expected_error.format(path='''root''' ) ) ): __SCREAMING_SNAKE_CASE : int = ReadMe.from_string(snake_case , snake_case ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" with pytest.raises(snake_case , match=re.escape(expected_error.format(path='''root''' ) ) ): ReadMe.from_string(snake_case , snake_case ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def a__ ( snake_case ): """simple docstring""" ReadMe.from_string(snake_case , snake_case , suppress_parsing_errors=snake_case ) @pytest.mark.parametrize( '''readme_md, expected_dict''' , [ (README_CORRECT, CORRECT_DICT), (README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE : Any = Path(snake_case ) / '''README.md''' with open(snake_case , '''w+''' ) as readme_file: readme_file.write(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = ReadMe.from_readme(snake_case , snake_case ).to_dict() assert out["name"] == path assert out["text"] == "" assert out["is_empty_text"] assert out["subsections"] == expected_dict["subsections"] @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_NO_YAML, EXPECTED_ERROR_README_NO_YAML), (README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML), (README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML), (README_EMPTY, EXPECTED_ERROR_README_EMPTY), (README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION), (README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL), (README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION), (README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT), (README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL), (README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL), (README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE : Optional[int] = Path(snake_case ) / '''README.md''' with open(snake_case , '''w+''' ) as readme_file: readme_file.write(snake_case ) __SCREAMING_SNAKE_CASE : str = expected_error.format(path=snake_case ) with pytest.raises(snake_case , match=re.escape(snake_case ) ): __SCREAMING_SNAKE_CASE : Dict = ReadMe.from_readme(snake_case , snake_case ) readme.validate() @pytest.mark.parametrize( '''readme_md, expected_error''' , [ (README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1), ] , ) def a__ ( snake_case , snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE : int = Path(snake_case ) / '''README.md''' with open(snake_case , '''w+''' ) as readme_file: readme_file.write(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = expected_error.format(path=snake_case ) with pytest.raises(snake_case , match=re.escape(snake_case ) ): ReadMe.from_readme(snake_case , snake_case ) @pytest.mark.parametrize( '''readme_md,''' , [ (README_MULTIPLE_SAME_HEADING_1), ] , ) def a__ ( snake_case ): """simple docstring""" with tempfile.TemporaryDirectory() as tmp_dir: __SCREAMING_SNAKE_CASE : List[str] = Path(snake_case ) / '''README.md''' with open(snake_case , '''w+''' ) as readme_file: readme_file.write(snake_case ) ReadMe.from_readme(snake_case , snake_case , suppress_parsing_errors=snake_case )
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a__ ( snake_case , snake_case=0.999 , snake_case="cosine" , ): """simple docstring""" if alpha_transform_type == "cosine": def alpha_bar_fn(snake_case ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(snake_case ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : Dict = [] for i in range(snake_case ): __SCREAMING_SNAKE_CASE : str = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : Tuple = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) ) return torch.tensor(snake_case , dtype=torch.floataa ) class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = [e.name for e in KarrasDiffusionSchedulers] lowerCAmelCase_ = 2 @register_to_config def __init__( self : List[str] , _A : int = 1000 , _A : float = 0.0_00_85 , _A : float = 0.0_12 , _A : str = "linear" , _A : Optional[Union[np.ndarray, List[float]]] = None , _A : str = "epsilon" , _A : str = "linspace" , _A : int = 0 , ): """simple docstring""" if trained_betas is not None: __SCREAMING_SNAKE_CASE : int = torch.tensor(_A , dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE : Dict = torch.linspace(_A , _A , _A , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : int = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _A , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : Tuple = betas_for_alpha_bar(_A ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) __SCREAMING_SNAKE_CASE : str = 1.0 - self.betas __SCREAMING_SNAKE_CASE : List[Any] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_A , _A , _A ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : Dict , _A : Dict=None ): """simple docstring""" if schedule_timesteps is None: __SCREAMING_SNAKE_CASE : int = self.timesteps __SCREAMING_SNAKE_CASE : Union[str, Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = 1 if len(_A ) > 1 else 0 else: __SCREAMING_SNAKE_CASE : int = timestep.cpu().item() if torch.is_tensor(_A ) else timestep __SCREAMING_SNAKE_CASE : List[str] = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase__ ( self : Dict , _A : torch.FloatTensor , _A : Union[float, torch.FloatTensor] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.index_for_timestep(_A ) if self.state_in_first_order: __SCREAMING_SNAKE_CASE : str = self.sigmas[step_index] else: __SCREAMING_SNAKE_CASE : List[str] = self.sigmas_interpol[step_index] __SCREAMING_SNAKE_CASE : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase__ ( self : List[Any] , _A : int , _A : Union[str, torch.device] = None , _A : Optional[int] = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = num_inference_steps __SCREAMING_SNAKE_CASE : int = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __SCREAMING_SNAKE_CASE : Optional[int] = np.linspace(0 , num_train_timesteps - 1 , _A , dtype=_A )[::-1].copy() elif self.config.timestep_spacing == "leading": __SCREAMING_SNAKE_CASE : Dict = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Union[str, Any] = (np.arange(0 , _A ) * step_ratio).round()[::-1].copy().astype(_A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __SCREAMING_SNAKE_CASE : List[str] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Dict = (np.arange(_A , 0 , -step_ratio )).round().copy().astype(_A ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __SCREAMING_SNAKE_CASE : Any = torch.from_numpy(np.log(_A ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.interp(_A , np.arange(0 , len(_A ) ) , _A ) __SCREAMING_SNAKE_CASE : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(_A ).to(device=_A ) # interpolate sigmas __SCREAMING_SNAKE_CASE : Tuple = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __SCREAMING_SNAKE_CASE : Optional[int] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __SCREAMING_SNAKE_CASE : Tuple = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_A ).startswith('''mps''' ): # mps does not support float64 __SCREAMING_SNAKE_CASE : int = torch.from_numpy(_A ).to(_A , dtype=torch.floataa ) else: __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(_A ).to(_A ) # interpolate timesteps __SCREAMING_SNAKE_CASE : Dict = self.sigma_to_t(_A ).to(_A , dtype=timesteps.dtype ) __SCREAMING_SNAKE_CASE : int = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __SCREAMING_SNAKE_CASE : int = torch.cat([timesteps[:1], interleaved_timesteps] ) __SCREAMING_SNAKE_CASE : str = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(_A ) def UpperCAmelCase__ ( self : int , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = sigma.log() # get distribution __SCREAMING_SNAKE_CASE : Union[str, Any] = log_sigma - self.log_sigmas[:, None] # get sigmas range __SCREAMING_SNAKE_CASE : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __SCREAMING_SNAKE_CASE : int = low_idx + 1 __SCREAMING_SNAKE_CASE : Dict = self.log_sigmas[low_idx] __SCREAMING_SNAKE_CASE : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas __SCREAMING_SNAKE_CASE : Optional[int] = (low - log_sigma) / (low - high) __SCREAMING_SNAKE_CASE : str = w.clamp(0 , 1 ) # transform interpolation to time range __SCREAMING_SNAKE_CASE : Dict = (1 - w) * low_idx + w * high_idx __SCREAMING_SNAKE_CASE : List[str] = t.view(sigma.shape ) return t @property def UpperCAmelCase__ ( self : str ): """simple docstring""" return self.sample is None def UpperCAmelCase__ ( self : Optional[int] , _A : Union[torch.FloatTensor, np.ndarray] , _A : Union[float, torch.FloatTensor] , _A : Union[torch.FloatTensor, np.ndarray] , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.index_for_timestep(_A ) # advance index counter by 1 __SCREAMING_SNAKE_CASE : List[Any] = timestep.cpu().item() if torch.is_tensor(_A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index] __SCREAMING_SNAKE_CASE : Dict = self.sigmas_interpol[step_index + 1] __SCREAMING_SNAKE_CASE : Optional[Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index - 1] __SCREAMING_SNAKE_CASE : str = self.sigmas_interpol[step_index] __SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __SCREAMING_SNAKE_CASE : Tuple = 0 __SCREAMING_SNAKE_CASE : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol __SCREAMING_SNAKE_CASE : Any = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE : List[str] = sigma_hat if self.state_in_first_order else sigma_interpol __SCREAMING_SNAKE_CASE : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __SCREAMING_SNAKE_CASE : Optional[int] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __SCREAMING_SNAKE_CASE : str = sigma_interpol - sigma_hat # store for 2nd order step __SCREAMING_SNAKE_CASE : Any = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __SCREAMING_SNAKE_CASE : str = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __SCREAMING_SNAKE_CASE : Dict = sigma_next - sigma_hat __SCREAMING_SNAKE_CASE : List[str] = self.sample __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : List[str] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_A ) def UpperCAmelCase__ ( self : Tuple , _A : torch.FloatTensor , _A : torch.FloatTensor , _A : torch.FloatTensor , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_A ): # mps does not support float64 __SCREAMING_SNAKE_CASE : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : int = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = self.timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE : int = timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE : Dict = [self.index_for_timestep(_A , _A ) for t in timesteps] __SCREAMING_SNAKE_CASE : Optional[int] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE : Optional[Any] = sigma.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE : Optional[int] = original_samples + noise * sigma return noisy_samples def __len__( self : int ): """simple docstring""" return self.config.num_train_timesteps
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ShapEPipeline lowerCAmelCase_ = ['''prompt'''] lowerCAmelCase_ = ['''prompt'''] lowerCAmelCase_ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] lowerCAmelCase_ = False @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 32 @property def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" return 32 @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return 8 @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(_A ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = { '''num_attention_heads''': 2, '''attention_head_dim''': 16, '''embedding_dim''': self.time_input_dim, '''num_embeddings''': 32, '''embedding_proj_dim''': self.text_embedder_hidden_size, '''time_embed_dim''': self.time_embed_dim, '''num_layers''': 1, '''clip_embed_dim''': self.time_input_dim * 2, '''additional_embeddings''': 0, '''time_embed_act_fn''': '''gelu''', '''norm_in_type''': '''layer''', '''encoder_hid_proj_type''': None, '''added_emb_type''': None, } __SCREAMING_SNAKE_CASE : List[Any] = PriorTransformer(**_A ) return model @property def UpperCAmelCase__ ( self : str ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[Any] = { '''param_shapes''': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), '''d_latent''': self.time_input_dim, '''d_hidden''': self.renderer_dim, '''n_output''': 12, '''background''': ( 0.1, 0.1, 0.1, ), } __SCREAMING_SNAKE_CASE : Union[str, Any] = ShapERenderer(**_A ) return model def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.dummy_prior __SCREAMING_SNAKE_CASE : int = self.dummy_text_encoder __SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_tokenizer __SCREAMING_SNAKE_CASE : List[Any] = self.dummy_renderer __SCREAMING_SNAKE_CASE : Tuple = HeunDiscreteScheduler( beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , ) __SCREAMING_SNAKE_CASE : Dict = { '''prior''': prior, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''renderer''': renderer, '''scheduler''': scheduler, } return components def UpperCAmelCase__ ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ): """simple docstring""" if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : int = { '''prompt''': '''horse''', '''generator''': generator, '''num_inference_steps''': 1, '''frame_size''': 32, '''output_type''': '''np''', } return inputs def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = '''cpu''' __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.pipeline_class(**_A ) __SCREAMING_SNAKE_CASE : str = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : Dict = pipe(**self.get_dummy_inputs(_A ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0] __SCREAMING_SNAKE_CASE : Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __SCREAMING_SNAKE_CASE : str = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : str ): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = torch_device == '''cpu''' __SCREAMING_SNAKE_CASE : List[str] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_A , relax_max_difference=_A , ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Tuple = self.pipeline_class(**_A ) __SCREAMING_SNAKE_CASE : Dict = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : Tuple = 1 __SCREAMING_SNAKE_CASE : Any = 2 __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_inputs(_A ) for key in inputs.keys(): if key in self.batch_params: __SCREAMING_SNAKE_CASE : str = batch_size * [inputs[key]] __SCREAMING_SNAKE_CASE : str = pipe(**_A , num_images_per_prompt=_A )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : int ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/test_shap_e_np_out.npy''' ) __SCREAMING_SNAKE_CASE : str = ShapEPipeline.from_pretrained('''openai/shap-e''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : Any = torch.Generator(device=_A ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe( '''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_A , _A )
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowercase_ = TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): """simple docstring""" def __init__( self : Tuple , _A : T ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = data __SCREAMING_SNAKE_CASE : Node[T] | None = None def __str__( self : Any ): """simple docstring""" return F'''{self.data}''' class __UpperCamelCase ( Generic[T] ): """simple docstring""" def __init__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Node[T] | None = None def __iter__( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.top while node: yield node.data __SCREAMING_SNAKE_CASE : Optional[int] = node.next def __str__( self : Union[str, Any] ): """simple docstring""" return "->".join([str(_A ) for item in self] ) def __len__( self : List[Any] ): """simple docstring""" return len(tuple(iter(self ) ) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" return self.top is None def UpperCAmelCase__ ( self : Any , _A : T ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = Node(_A ) if not self.is_empty(): __SCREAMING_SNAKE_CASE : Optional[int] = self.top __SCREAMING_SNAKE_CASE : Union[str, Any] = node def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" if self.is_empty(): raise IndexError('''pop from empty stack''' ) assert isinstance(self.top , _A ) __SCREAMING_SNAKE_CASE : Dict = self.top __SCREAMING_SNAKE_CASE : Dict = self.top.next return pop_node.data def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" if self.is_empty(): raise IndexError('''peek from empty stack''' ) assert self.top is not None return self.top.data def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = None if __name__ == "__main__": from doctest import testmod testmod()
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def a__ ( *snake_case ): """simple docstring""" with open(snake_case , '''r''' ) as fh: fcntl.flock(snake_case , fcntl.LOCK_EX ) try: print(*snake_case ) finally: fcntl.flock(snake_case , fcntl.LOCK_UN ) lowercase_ = int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) lowercase_ = torch.device("""cuda""", local_rank) lowercase_ = socket.gethostname() lowercase_ = f'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank lowercase_ = dist.get_rank() lowercase_ = dist.get_world_size() printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(f'''{gpu} is broken''') raise
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
import copy import os from typing import TYPE_CHECKING, List, Union if TYPE_CHECKING: pass from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''align_text_model''' def __init__( self : Optional[int] , _A : Union[str, Any]=3_0522 , _A : List[str]=768 , _A : List[Any]=12 , _A : int=12 , _A : str=3072 , _A : Optional[Any]="gelu" , _A : Union[str, Any]=0.1 , _A : List[str]=0.1 , _A : List[str]=512 , _A : Union[str, Any]=2 , _A : Optional[int]=0.02 , _A : Optional[int]=1e-12 , _A : List[str]=0 , _A : Optional[Any]="absolute" , _A : Tuple=True , **_A : str , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size __SCREAMING_SNAKE_CASE : Dict = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : int = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = position_embedding_type __SCREAMING_SNAKE_CASE : Tuple = use_cache __SCREAMING_SNAKE_CASE : Union[str, Any] = pad_token_id @classmethod def UpperCAmelCase__ ( cls : int , _A : Union[str, os.PathLike] , **_A : Union[str, Any] ): """simple docstring""" cls._set_token_in_kwargs(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_A , **_A ) # get the text config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": __SCREAMING_SNAKE_CASE : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''align_vision_model''' def __init__( self : Dict , _A : int = 3 , _A : int = 600 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [32, 16, 24, 40, 80, 112, 192] , _A : List[int] = [16, 24, 40, 80, 112, 192, 320] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2560 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.0_01 , _A : float = 0.99 , _A : float = 0.2 , **_A : Optional[Any] , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Optional[int] = num_channels __SCREAMING_SNAKE_CASE : Tuple = image_size __SCREAMING_SNAKE_CASE : Any = width_coefficient __SCREAMING_SNAKE_CASE : Union[str, Any] = depth_coefficient __SCREAMING_SNAKE_CASE : Tuple = depth_divisor __SCREAMING_SNAKE_CASE : Any = kernel_sizes __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = out_channels __SCREAMING_SNAKE_CASE : str = depthwise_padding __SCREAMING_SNAKE_CASE : List[Any] = strides __SCREAMING_SNAKE_CASE : List[str] = num_block_repeats __SCREAMING_SNAKE_CASE : Union[str, Any] = expand_ratios __SCREAMING_SNAKE_CASE : Union[str, Any] = squeeze_expansion_ratio __SCREAMING_SNAKE_CASE : int = hidden_act __SCREAMING_SNAKE_CASE : str = hidden_dim __SCREAMING_SNAKE_CASE : Union[str, Any] = pooling_type __SCREAMING_SNAKE_CASE : Any = initializer_range __SCREAMING_SNAKE_CASE : int = batch_norm_eps __SCREAMING_SNAKE_CASE : Optional[Any] = batch_norm_momentum __SCREAMING_SNAKE_CASE : Optional[int] = drop_connect_rate __SCREAMING_SNAKE_CASE : Optional[Any] = sum(_A ) * 4 @classmethod def UpperCAmelCase__ ( cls : Any , _A : Union[str, os.PathLike] , **_A : Optional[Any] ): """simple docstring""" cls._set_token_in_kwargs(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from AlignConfig if config_dict.get('''model_type''' ) == "align": __SCREAMING_SNAKE_CASE : List[Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''align''' lowerCAmelCase_ = True def __init__( self : str , _A : Optional[int]=None , _A : Optional[int]=None , _A : Dict=640 , _A : int=1.0 , _A : Any=0.02 , **_A : int , ): """simple docstring""" super().__init__(**_A ) if text_config is None: __SCREAMING_SNAKE_CASE : Dict = {} logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' ) if vision_config is None: __SCREAMING_SNAKE_CASE : Any = {} logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' ) __SCREAMING_SNAKE_CASE : Tuple = AlignTextConfig(**_A ) __SCREAMING_SNAKE_CASE : int = AlignVisionConfig(**_A ) __SCREAMING_SNAKE_CASE : Optional[int] = projection_dim __SCREAMING_SNAKE_CASE : str = temperature_init_value __SCREAMING_SNAKE_CASE : Any = initializer_range @classmethod def UpperCAmelCase__ ( cls : Dict , _A : AlignTextConfig , _A : AlignVisionConfig , **_A : str ): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.text_config.to_dict() __SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict() __SCREAMING_SNAKE_CASE : int = self.__class__.model_type return output
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
from math import factorial def a__ ( snake_case , snake_case ): """simple docstring""" # If either of the conditions are true, the function is being asked # to calculate a factorial of a negative number, which is not possible if n < k or k < 0: raise ValueError('''Please enter positive integers for n and k where n >= k''' ) return factorial(snake_case ) // (factorial(snake_case ) * factorial(n - k )) if __name__ == "__main__": print( """The number of five-card hands possible from a standard""", f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( """If a class of 40 students must be arranged into groups of""", f'''4 for group projects, there are {combinations(40, 4)} ways''', """to arrange them.\n""", ) print( """If 10 teams are competing in a Formula One race, there""", f'''are {combinations(10, 3)} ways that first, second and''', """third place can be awarded.""", )
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf lowercase_ = logging.get_logger(__name__) @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self : Union[str, Any] , **_A : List[str] ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __SCREAMING_SNAKE_CASE : str = deprecated_arg[3:] __SCREAMING_SNAKE_CASE : Optional[int] = not kwargs.pop(_A ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) __SCREAMING_SNAKE_CASE : str = kwargs.pop('''tpu_name''' , self.tpu_name ) __SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''device_idx''' , self.device_idx ) __SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''eager_mode''' , self.eager_mode ) __SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**_A ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Name of TPU'''} , ) lowerCAmelCase_ = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Benchmark models in eager model.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" requires_backends(self , ['''tf'''] ) __SCREAMING_SNAKE_CASE : str = None if self.tpu: try: if self.tpu_name: __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __SCREAMING_SNAKE_CASE : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __SCREAMING_SNAKE_CASE : Optional[Any] = None return tpu @cached_property def UpperCAmelCase__ ( self : str ): """simple docstring""" requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __SCREAMING_SNAKE_CASE : List[str] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) __SCREAMING_SNAKE_CASE : List[Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def UpperCAmelCase__ ( self : Dict ): """simple docstring""" requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" return self.n_gpu > 0
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from __future__ import annotations import time import numpy as np lowercase_ = [8, 5, 9, 7] lowercase_ = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] lowercase_ = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class __UpperCamelCase : """simple docstring""" def __init__( self : Union[str, Any] , _A : list[int] , _A : list[list[int]] , _A : list[list[int]] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = claim_vector __SCREAMING_SNAKE_CASE : int = allocated_resources_table __SCREAMING_SNAKE_CASE : List[Any] = maximum_claim_table def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def UpperCAmelCase__ ( self : int ): """simple docstring""" return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(_A ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" return {self.__need().index(_A ): i for i in self.__need()} def UpperCAmelCase__ ( self : Optional[Any] , **_A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.__need() __SCREAMING_SNAKE_CASE : str = self.__allocated_resources_table __SCREAMING_SNAKE_CASE : Union[str, Any] = self.__available_resources() __SCREAMING_SNAKE_CASE : Dict = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('''_''' * 50 + '''\n''' ) while need_list: __SCREAMING_SNAKE_CASE : Optional[int] = False for each_need in need_list: __SCREAMING_SNAKE_CASE : Optional[int] = True for index, need in enumerate(_A ): if need > available_resources[index]: __SCREAMING_SNAKE_CASE : Any = False break if execution: __SCREAMING_SNAKE_CASE : int = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __SCREAMING_SNAKE_CASE : List[Any] = original_need_index print(F'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(_A ) # update available/freed resources stack __SCREAMING_SNAKE_CASE : int = np.array(_A ) + np.array( alloc_resources_table[process_number] ) print( '''Updated available resource stack for processes: ''' + ''' '''.join([str(_A ) for x in available_resources] ) ) break if safe: print('''The process is in a safe state.\n''' ) else: print('''System in unsafe state. Aborting...\n''' ) break def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" print(''' ''' * 9 + '''Allocated Resource Table''' ) for item in self.__allocated_resources_table: print( F'''P{self.__allocated_resources_table.index(_A ) + 1}''' + ''' '''.join(F'''{it:>8}''' for it in item ) + '''\n''' ) print(''' ''' * 9 + '''System Resource Table''' ) for item in self.__maximum_claim_table: print( F'''P{self.__maximum_claim_table.index(_A ) + 1}''' + ''' '''.join(F'''{it:>8}''' for it in item ) + '''\n''' ) print( '''Current Usage by Active Processes: ''' + ''' '''.join(str(_A ) for x in self.__claim_vector ) ) print( '''Initial Available Resources: ''' + ''' '''.join(str(_A ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''owlvit_text_model''' def __init__( self : Dict , _A : int=4_9408 , _A : int=512 , _A : Optional[int]=2048 , _A : List[str]=12 , _A : Any=8 , _A : str=16 , _A : Union[str, Any]="quick_gelu" , _A : Optional[Any]=1e-5 , _A : Tuple=0.0 , _A : Tuple=0.02 , _A : Tuple=1.0 , _A : Union[str, Any]=0 , _A : Tuple=4_9406 , _A : Any=4_9407 , **_A : Tuple , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size __SCREAMING_SNAKE_CASE : Dict = intermediate_size __SCREAMING_SNAKE_CASE : int = num_hidden_layers __SCREAMING_SNAKE_CASE : List[str] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : int = hidden_act __SCREAMING_SNAKE_CASE : int = layer_norm_eps __SCREAMING_SNAKE_CASE : Tuple = attention_dropout __SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range __SCREAMING_SNAKE_CASE : Tuple = initializer_factor @classmethod def UpperCAmelCase__ ( cls : Optional[int] , _A : Union[str, os.PathLike] , **_A : Tuple ): """simple docstring""" cls._set_token_in_kwargs(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = cls.get_config_dict(_A , **_A ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": __SCREAMING_SNAKE_CASE : Optional[int] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''owlvit_vision_model''' def __init__( self : List[str] , _A : Optional[Any]=768 , _A : List[Any]=3072 , _A : Union[str, Any]=12 , _A : Dict=12 , _A : Tuple=3 , _A : Any=768 , _A : Optional[Any]=32 , _A : List[str]="quick_gelu" , _A : Optional[int]=1e-5 , _A : Union[str, Any]=0.0 , _A : Tuple=0.02 , _A : Union[str, Any]=1.0 , **_A : Any , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : int = hidden_size __SCREAMING_SNAKE_CASE : Tuple = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : Dict = patch_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_act __SCREAMING_SNAKE_CASE : Any = layer_norm_eps __SCREAMING_SNAKE_CASE : List[str] = attention_dropout __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , _A : Union[str, os.PathLike] , **_A : Optional[Any] ): """simple docstring""" cls._set_token_in_kwargs(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = cls.get_config_dict(_A , **_A ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": __SCREAMING_SNAKE_CASE : Union[str, Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''owlvit''' lowerCAmelCase_ = True def __init__( self : int , _A : Optional[Any]=None , _A : List[Any]=None , _A : Optional[Any]=512 , _A : List[str]=2.65_92 , _A : List[Any]=True , **_A : List[Any] , ): """simple docstring""" super().__init__(**_A ) if text_config is None: __SCREAMING_SNAKE_CASE : Optional[Any] = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: __SCREAMING_SNAKE_CASE : Union[str, Any] = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) __SCREAMING_SNAKE_CASE : Any = OwlViTTextConfig(**_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = OwlViTVisionConfig(**_A ) __SCREAMING_SNAKE_CASE : Tuple = projection_dim __SCREAMING_SNAKE_CASE : List[str] = logit_scale_init_value __SCREAMING_SNAKE_CASE : Any = return_dict __SCREAMING_SNAKE_CASE : Optional[Any] = 1.0 @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , _A : Union[str, os.PathLike] , **_A : Optional[Any] ): """simple docstring""" cls._set_token_in_kwargs(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(_A , **_A ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(_A , **_A ) @classmethod def UpperCAmelCase__ ( cls : Any , _A : Dict , _A : Dict , **_A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = {} __SCREAMING_SNAKE_CASE : Union[str, Any] = text_config __SCREAMING_SNAKE_CASE : Optional[int] = vision_config return cls.from_dict(_A , **_A ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE : str = self.text_config.to_dict() __SCREAMING_SNAKE_CASE : Optional[int] = self.vision_config.to_dict() __SCREAMING_SNAKE_CASE : Any = self.__class__.model_type return output class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" @property def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4 def UpperCAmelCase__ ( self : List[str] , _A : "ProcessorMixin" , _A : int = -1 , _A : int = -1 , _A : Optional["TensorType"] = None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs( processor.tokenizer , batch_size=_A , seq_length=_A , framework=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = super().generate_dummy_inputs( processor.image_processor , batch_size=_A , framework=_A ) return {**text_input_dict, **image_input_dict} @property def UpperCAmelCase__ ( self : int ): """simple docstring""" return 14
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""", """YituTech/conv-bert-medium-small""": ( """https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json""" ), """YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''convbert''' def __init__( self : List[Any] , _A : Union[str, Any]=3_0522 , _A : Dict=768 , _A : int=12 , _A : Union[str, Any]=12 , _A : int=3072 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.1 , _A : Optional[Any]=0.1 , _A : Union[str, Any]=512 , _A : List[str]=2 , _A : List[Any]=0.02 , _A : Union[str, Any]=1e-12 , _A : Union[str, Any]=1 , _A : List[str]=0 , _A : Optional[Any]=2 , _A : int=768 , _A : List[str]=2 , _A : str=9 , _A : List[Any]=1 , _A : Optional[Any]=None , **_A : List[str] , ): """simple docstring""" super().__init__( pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , ) __SCREAMING_SNAKE_CASE : Dict = vocab_size __SCREAMING_SNAKE_CASE : int = hidden_size __SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers __SCREAMING_SNAKE_CASE : Any = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : Any = type_vocab_size __SCREAMING_SNAKE_CASE : Tuple = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = embedding_size __SCREAMING_SNAKE_CASE : str = head_ratio __SCREAMING_SNAKE_CASE : Tuple = conv_kernel_size __SCREAMING_SNAKE_CASE : int = num_groups __SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" if self.task == "multiple-choice": __SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from collections.abc import Callable import numpy as np def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = int(np.ceil((x_end - xa) / step_size ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros((n + 1,) ) __SCREAMING_SNAKE_CASE : str = ya __SCREAMING_SNAKE_CASE : Union[str, Any] = xa for k in range(snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = y[k] + step_size * ode_func(snake_case , y[k] ) __SCREAMING_SNAKE_CASE : Dict = y[k] + ( (step_size / 2) * (ode_func(snake_case , y[k] ) + ode_func(x + step_size , snake_case )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def a__ ( snake_case ): """simple docstring""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def a__ ( snake_case ): """simple docstring""" # word like '180' or '身高' or '神' for char in word: __SCREAMING_SNAKE_CASE : Optional[Any] = ord(snake_case ) if not _is_chinese_char(snake_case ): return 0 return 1 def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = set() for token in tokens: __SCREAMING_SNAKE_CASE : Union[str, Any] = len(snake_case ) > 1 and is_chinese(snake_case ) if chinese_word: word_set.add(snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = list(snake_case ) return word_list def a__ ( snake_case , snake_case ): """simple docstring""" if not chinese_word_set: return bert_tokens __SCREAMING_SNAKE_CASE : Optional[int] = max([len(snake_case ) for w in chinese_word_set] ) __SCREAMING_SNAKE_CASE : List[Any] = bert_tokens __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = 0, len(snake_case ) while start < end: __SCREAMING_SNAKE_CASE : Tuple = True if is_chinese(bert_word[start] ): __SCREAMING_SNAKE_CASE : Union[str, Any] = min(end - start , snake_case ) for i in range(snake_case , 1 , -1 ): __SCREAMING_SNAKE_CASE : List[str] = ''''''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __SCREAMING_SNAKE_CASE : Dict = '''##''' + bert_word[j] __SCREAMING_SNAKE_CASE : int = start + i __SCREAMING_SNAKE_CASE : Union[str, Any] = False break if single_word: start += 1 return bert_word def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [] for i in range(0 , len(snake_case ) , 100 ): __SCREAMING_SNAKE_CASE : Dict = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['''cws'''] ).cws __SCREAMING_SNAKE_CASE : Tuple = [get_chinese_word(snake_case ) for r in res] ltp_res.extend(snake_case ) assert len(snake_case ) == len(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(0 , len(snake_case ) , 100 ): __SCREAMING_SNAKE_CASE : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=snake_case , truncation=snake_case , max_length=512 ) bert_res.extend(res['''input_ids'''] ) assert len(snake_case ) == len(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for input_ids, chinese_word in zip(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = [] for id in input_ids: __SCREAMING_SNAKE_CASE : List[Any] = bert_tokenizer._convert_id_to_token(snake_case ) input_tokens.append(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = add_sub_symbol(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Dict = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(snake_case ): if token[:2] == "##": __SCREAMING_SNAKE_CASE : int = token[2:] # save chinese tokens' pos if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ): ref_id.append(snake_case ) ref_ids.append(snake_case ) assert len(snake_case ) == len(snake_case ) return ref_ids def a__ ( snake_case ): """simple docstring""" # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f: __SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() __SCREAMING_SNAKE_CASE : Any = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __SCREAMING_SNAKE_CASE : Optional[Any] = LTP(args.ltp ) # faster in GPU device __SCREAMING_SNAKE_CASE : Optional[int] = BertTokenizer.from_pretrained(args.bert ) __SCREAMING_SNAKE_CASE : List[Any] = prepare_ref(snake_case , snake_case , snake_case ) with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f: __SCREAMING_SNAKE_CASE : Optional[int] = [json.dumps(snake_case ) + '''\n''' for ref in ref_ids] f.writelines(snake_case ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser(description="""prepare_chinese_ref""") parser.add_argument( """--file_name""", required=False, type=str, default="""./resources/chinese-demo.txt""", help="""file need process, same as training data in lm""", ) parser.add_argument( """--ltp""", required=False, type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path""", ) parser.add_argument( """--bert""", required=False, type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""", ) parser.add_argument( """--save_path""", required=False, type=str, default="""./resources/ref.txt""", help="""path to save res""", ) lowercase_ = parser.parse_args() main(args)
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
# flake8: noqa # Lint as: python3 lowercase_ = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": lowercase_ = argparse.ArgumentParser( description=( """Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned""" """ Distillation""" ) ) parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""]) parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str) parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str) parser.add_argument("""--vocab_transform""", action="""store_true""") lowercase_ = parser.parse_args() if args.model_type == "bert": lowercase_ = BertForMaskedLM.from_pretrained(args.model_name) lowercase_ = """bert""" else: raise ValueError("""args.model_type should be \"bert\".""") lowercase_ = model.state_dict() lowercase_ = {} for w in ["word_embeddings", "position_embeddings"]: lowercase_ = state_dict[f'''{prefix}.embeddings.{w}.weight'''] for w in ["weight", "bias"]: lowercase_ = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}'''] lowercase_ = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}''' ] lowercase_ = state_dict[ f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}''' ] std_idx += 1 lowercase_ = state_dict["""cls.predictions.decoder.weight"""] lowercase_ = state_dict["""cls.predictions.bias"""] if args.vocab_transform: for w in ["weight", "bias"]: lowercase_ = state_dict[f'''cls.predictions.transform.dense.{w}'''] lowercase_ = state_dict[f'''cls.predictions.transform.LayerNorm.{w}'''] print(f'''N layers selected for distillation: {std_idx}''') print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : List[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = { '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } __SCREAMING_SNAKE_CASE : Dict = os.path.join(self.tmpdirname , _A ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_A , _A ) def UpperCAmelCase__ ( self : int , **_A : Any ): """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : Optional[int] , **_A : Optional[Any] ): """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : Union[str, Any] , **_A : Optional[int] ): """simple docstring""" return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __SCREAMING_SNAKE_CASE : List[str] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = self.get_image_processor() __SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) processor_slow.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A ) __SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) processor_fast.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : List[str] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _A ) self.assertIsInstance(processor_fast.tokenizer , _A ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _A ) self.assertIsInstance(processor_fast.image_processor , _A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : str = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' ) __SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor(do_normalize=_A ) __SCREAMING_SNAKE_CASE : str = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_A ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor() __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : List[Any] = image_processor(_A , return_tensors='''np''' ) __SCREAMING_SNAKE_CASE : List[str] = processor(images=_A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[int] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : str = '''Alexandra,T-shirt的价格是15便士。''' __SCREAMING_SNAKE_CASE : List[Any] = processor(text=_A ) __SCREAMING_SNAKE_CASE : str = tokenizer(_A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.get_image_processor() __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。''' __SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : int = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_A ): processor() def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor() __SCREAMING_SNAKE_CASE : str = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __SCREAMING_SNAKE_CASE : str = processor.batch_decode(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_image_processor() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Tuple = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A ) __SCREAMING_SNAKE_CASE : int = '''Alexandra,T-shirt的价格是15便士。''' __SCREAMING_SNAKE_CASE : Dict = self.prepare_image_inputs() __SCREAMING_SNAKE_CASE : Optional[int] = processor(text=_A , images=_A ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def a__ ( snake_case , snake_case , **snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(snake_case , **snake_case ) __SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_config(snake_case ) model.save_pretrained(snake_case ) AutoTokenizer.from_pretrained(snake_case ).save_pretrained(snake_case ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
def a__ ( snake_case ): """simple docstring""" return "".join([hex(snake_case )[2:].zfill(2 ).upper() for byte in list(snake_case )] ) def a__ ( snake_case ): """simple docstring""" # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(snake_case ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(snake_case ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(snake_case ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): lowercase_ = """pt""" elif is_tf_available(): lowercase_ = """tf""" else: lowercase_ = """jax""" class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = PerceiverTokenizer lowerCAmelCase_ = False def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : Any = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCAmelCase__ ( self : str ): """simple docstring""" return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' ) def UpperCAmelCase__ ( self : int , **_A : Any ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : Any , _A : Union[str, Any]=False , _A : str=20 , _A : int=5 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(len(_A ) ): try: __SCREAMING_SNAKE_CASE : Any = tokenizer.decode([i] , clean_up_tokenization_spaces=_A ) except UnicodeDecodeError: pass toks.append((i, tok) ) __SCREAMING_SNAKE_CASE : int = list(filter(lambda _A : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , _A ) ) __SCREAMING_SNAKE_CASE : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) ) if max_length is not None and len(_A ) > max_length: __SCREAMING_SNAKE_CASE : List[str] = toks[:max_length] if min_length is not None and len(_A ) < min_length and len(_A ) > 0: while len(_A ) < min_length: __SCREAMING_SNAKE_CASE : Optional[int] = toks + toks # toks_str = [t[1] for t in toks] __SCREAMING_SNAKE_CASE : List[str] = [t[0] for t in toks] # Ensure consistency __SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A ) if " " not in output_txt and len(_A ) > 1: __SCREAMING_SNAKE_CASE : int = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A ) + ''' ''' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A ) ) if with_prefix_space: __SCREAMING_SNAKE_CASE : Optional[int] = ''' ''' + output_txt __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A ) return output_txt, output_ids def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.perceiver_tokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = '''Unicode €.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer(_A ) __SCREAMING_SNAKE_CASE : Tuple = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded['''input_ids'''] , _A ) # decoding __SCREAMING_SNAKE_CASE : List[Any] = tokenizer.decode(_A ) self.assertEqual(_A , '''[CLS]Unicode €.[SEP]''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer('''e è é ê ë''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded['''input_ids'''] , _A ) # decoding __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.decode(_A ) self.assertEqual(_A , '''[CLS]e è é ê ë[SEP]''' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.perceiver_tokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] # fmt: off __SCREAMING_SNAKE_CASE : List[str] = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on __SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , padding=_A , return_tensors=_A ) self.assertIsInstance(_A , _A ) if FRAMEWORK != "jax": __SCREAMING_SNAKE_CASE : List[str] = list(batch.input_ids.numpy()[0] ) else: __SCREAMING_SNAKE_CASE : Optional[int] = list(batch.input_ids.tolist()[0] ) self.assertListEqual(_A , _A ) self.assertEqual((2, 38) , batch.input_ids.shape ) self.assertEqual((2, 38) , batch.attention_mask.shape ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.perceiver_tokenizer __SCREAMING_SNAKE_CASE : Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(_A , padding=_A , return_tensors=_A ) # check if input_ids are returned and no decoder_input_ids self.assertIn('''input_ids''' , _A ) self.assertIn('''attention_mask''' , _A ) self.assertNotIn('''decoder_input_ids''' , _A ) self.assertNotIn('''decoder_attention_mask''' , _A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.perceiver_tokenizer __SCREAMING_SNAKE_CASE : Tuple = [ '''Summary of the text.''', '''Another summary.''', ] __SCREAMING_SNAKE_CASE : Any = tokenizer( text_target=_A , max_length=32 , padding='''max_length''' , truncation=_A , return_tensors=_A ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : Dict = ''' He is very happy, UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.__class__.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) shutil.rmtree(_A ) __SCREAMING_SNAKE_CASE : str = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc __SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : List[Any] = ''' He is very happy, UNwant\u00E9d,running''' tokenizer.add_tokens(['''bim''', '''bambam'''] ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.additional_special_tokens additional_special_tokens.append('''new_additional_special_token''' ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) tokenizer.save_pretrained(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.__class__.from_pretrained(_A ) __SCREAMING_SNAKE_CASE : List[Any] = after_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.__class__.from_pretrained(_A , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(_A ) with open(os.path.join(_A , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: __SCREAMING_SNAKE_CASE : Any = json.load(_A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: __SCREAMING_SNAKE_CASE : Dict = json.load(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [F'''<extra_id_{i}>''' for i in range(125 )] __SCREAMING_SNAKE_CASE : Optional[Any] = added_tokens_extra_ids + [ '''an_additional_special_token''' ] __SCREAMING_SNAKE_CASE : Any = added_tokens_extra_ids + [ '''an_additional_special_token''' ] with open(os.path.join(_A , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) with open(os.path.join(_A , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(_A , _A ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_class.from_pretrained( _A , ) self.assertIn( '''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained __SCREAMING_SNAKE_CASE : str = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=_A )] __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_class.from_pretrained( _A , additional_special_tokens=_A , ) self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens ) self.assertEqual( ['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178] ) , '''�''' ) def UpperCAmelCase__ ( self : str ): """simple docstring""" pass def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" pass def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" pass def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizers(fast=_A , do_lower_case=_A ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[CLS]''', '''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''s''', '''t''', '''[SEP]'''] __SCREAMING_SNAKE_CASE : int = tokenizer.convert_tokens_to_string(_A ) self.assertIsInstance(_A , _A )
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } lowercase_ = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } lowercase_ = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = set() __SCREAMING_SNAKE_CASE : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __SCREAMING_SNAKE_CASE : int = char __SCREAMING_SNAKE_CASE : str = set(snake_case ) return pairs class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : str="<s>" , _A : Any="</s>" , _A : List[str]="</s>" , _A : str="<s>" , _A : Optional[int]="<unk>" , _A : Any="<pad>" , _A : Dict="<mask>" , **_A : Tuple , ): """simple docstring""" super().__init__( bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , **_A , ) __SCREAMING_SNAKE_CASE : str = vocab_file __SCREAMING_SNAKE_CASE : List[Any] = merges_file __SCREAMING_SNAKE_CASE : List[str] = {} __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : Optional[Any] = 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = 3 self.add_from_file(_A ) __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.encoder.items()} with open(_A , encoding='''utf-8''' ) as merges_handle: __SCREAMING_SNAKE_CASE : Optional[Any] = merges_handle.read().split('''\n''' )[:-1] __SCREAMING_SNAKE_CASE : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] __SCREAMING_SNAKE_CASE : Tuple = dict(zip(_A , range(len(_A ) ) ) ) __SCREAMING_SNAKE_CASE : List[Any] = {} def UpperCAmelCase__ ( self : List[str] , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __SCREAMING_SNAKE_CASE : Any = [self.cls_token_id] __SCREAMING_SNAKE_CASE : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase__ ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is None: return [1] + ([0] * len(_A )) + [1] return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1] def UpperCAmelCase__ ( self : List[Any] , _A : List[int] , _A : Optional[List[int]] = None ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return len(self.encoder ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase__ ( self : List[str] , _A : List[Any] ): """simple docstring""" if token in self.cache: return self.cache[token] __SCREAMING_SNAKE_CASE : int = tuple(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __SCREAMING_SNAKE_CASE : Optional[int] = get_pairs(_A ) if not pairs: return token while True: __SCREAMING_SNAKE_CASE : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = bigram __SCREAMING_SNAKE_CASE : List[str] = [] __SCREAMING_SNAKE_CASE : List[str] = 0 while i < len(_A ): try: __SCREAMING_SNAKE_CASE : Union[str, Any] = word.index(_A , _A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __SCREAMING_SNAKE_CASE : Dict = j if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __SCREAMING_SNAKE_CASE : List[str] = tuple(_A ) __SCREAMING_SNAKE_CASE : List[Any] = new_word if len(_A ) == 1: break else: __SCREAMING_SNAKE_CASE : Tuple = get_pairs(_A ) __SCREAMING_SNAKE_CASE : Tuple = '''@@ '''.join(_A ) __SCREAMING_SNAKE_CASE : Tuple = word[:-4] __SCREAMING_SNAKE_CASE : Optional[int] = word return word def UpperCAmelCase__ ( self : str , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Optional[int] = re.findall(r'''\S+\n?''' , _A ) for token in words: split_tokens.extend(list(self.bpe(_A ).split(''' ''' ) ) ) return split_tokens def UpperCAmelCase__ ( self : List[str] , _A : Optional[Any] ): """simple docstring""" return self.encoder.get(_A , self.encoder.get(self.unk_token ) ) def UpperCAmelCase__ ( self : Dict , _A : int ): """simple docstring""" return self.decoder.get(_A , self.unk_token ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ''' '''.join(_A ).replace('''@@ ''' , '''''' ).strip() return out_string def UpperCAmelCase__ ( self : Dict , _A : str , _A : Optional[str] = None ): """simple docstring""" if not os.path.isdir(_A ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __SCREAMING_SNAKE_CASE : Tuple = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __SCREAMING_SNAKE_CASE : Tuple = os.path.join( _A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ): copyfile(self.vocab_file , _A ) if os.path.abspath(self.merges_file ) != os.path.abspath(_A ): copyfile(self.merges_file , _A ) return out_vocab_file, out_merge_file def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" if isinstance(_A , _A ): try: with open(_A , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(_A ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' ) return __SCREAMING_SNAKE_CASE : Any = f.readlines() for lineTmp in lines: __SCREAMING_SNAKE_CASE : List[Any] = lineTmp.strip() __SCREAMING_SNAKE_CASE : Tuple = line.rfind(''' ''' ) if idx == -1: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' ) __SCREAMING_SNAKE_CASE : Tuple = line[:idx] __SCREAMING_SNAKE_CASE : Optional[int] = len(self.encoder )
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
from __future__ import annotations from collections.abc import Generator import requests from bsa import BeautifulSoup lowercase_ = """https://www.indeed.co.in/jobs?q=mobile+app+development&l=""" def a__ ( snake_case = "mumbai" ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' ) # This attribute finds out all the specifics listed in a job for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ): __SCREAMING_SNAKE_CASE : Optional[int] = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip() __SCREAMING_SNAKE_CASE : List[str] = job.find('''span''' , {'''class''': '''company'''} ).text.strip() yield job_title, company_name if __name__ == "__main__": for i, job in enumerate(fetch_jobs("""Bangalore"""), 1): print(f'''Job {i:>2} is {job[0]} at {job[1]}''')
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def a__ ( snake_case , snake_case , snake_case = None ): """simple docstring""" if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path __SCREAMING_SNAKE_CASE : Dict = quote(snake_case ) return hfh.hf_hub_url(snake_case , snake_case , repo_type='''dataset''' , revision=snake_case )
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class __UpperCamelCase : """simple docstring""" def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Any ): """simple docstring""" raise NotImplementedError() class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Any , _A : "AutoTokenizer" , _A : bool = False , **_A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = tokenizer __SCREAMING_SNAKE_CASE : Optional[Any] = skip_prompt __SCREAMING_SNAKE_CASE : Optional[Any] = decode_kwargs # variables used in the streaming process __SCREAMING_SNAKE_CASE : Union[str, Any] = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = True def UpperCAmelCase__ ( self : List[Any] , _A : str ): """simple docstring""" if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('''TextStreamer only supports batch size 1''' ) elif len(value.shape ) > 1: __SCREAMING_SNAKE_CASE : Union[str, Any] = value[0] if self.skip_prompt and self.next_tokens_are_prompt: __SCREAMING_SNAKE_CASE : int = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('''\n''' ): __SCREAMING_SNAKE_CASE : Any = text[self.print_len :] __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # If the last token is a CJK character, we print the characters. elif len(_A ) > 0 and self._is_chinese_char(ord(text[-1] ) ): __SCREAMING_SNAKE_CASE : Dict = text[self.print_len :] self.print_len += len(_A ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: __SCREAMING_SNAKE_CASE : Union[str, Any] = text[self.print_len : text.rfind(''' ''' ) + 1] self.print_len += len(_A ) self.on_finalized_text(_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" if len(self.token_cache ) > 0: __SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs ) __SCREAMING_SNAKE_CASE : Dict = text[self.print_len :] __SCREAMING_SNAKE_CASE : Union[str, Any] = [] __SCREAMING_SNAKE_CASE : int = 0 else: __SCREAMING_SNAKE_CASE : Dict = '''''' __SCREAMING_SNAKE_CASE : List[Any] = True self.on_finalized_text(_A , stream_end=_A ) def UpperCAmelCase__ ( self : Any , _A : str , _A : bool = False ): """simple docstring""" print(_A , flush=_A , end='''''' if not stream_end else None ) def UpperCAmelCase__ ( self : str , _A : List[Any] ): """simple docstring""" if ( (cp >= 0x4E_00 and cp <= 0x9F_FF) or (cp >= 0x34_00 and cp <= 0x4D_BF) # or (cp >= 0x2_00_00 and cp <= 0x2_A6_DF) # or (cp >= 0x2_A7_00 and cp <= 0x2_B7_3F) # or (cp >= 0x2_B7_40 and cp <= 0x2_B8_1F) # or (cp >= 0x2_B8_20 and cp <= 0x2_CE_AF) # or (cp >= 0xF9_00 and cp <= 0xFA_FF) or (cp >= 0x2_F8_00 and cp <= 0x2_FA_1F) # ): # return True return False class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , _A : "AutoTokenizer" , _A : bool = False , _A : Optional[float] = None , **_A : Optional[Any] ): """simple docstring""" super().__init__(_A , _A , **_A ) __SCREAMING_SNAKE_CASE : int = Queue() __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : int = timeout def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : bool = False ): """simple docstring""" self.text_queue.put(_A , timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal , timeout=self.timeout ) def __iter__( self : str ): """simple docstring""" return self def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
def a__ ( snake_case = 1_000_000 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = limit + 1 __SCREAMING_SNAKE_CASE : str = [0] * limit for first_term in range(1 , snake_case ): for n in range(snake_case , snake_case , snake_case ): __SCREAMING_SNAKE_CASE : str = first_term + n / first_term if common_difference % 4: # d must be divisble by 4 continue else: common_difference /= 4 if ( first_term > common_difference and first_term < 4 * common_difference ): # since x,y,z are positive integers frequency[n] += 1 # so z>0 and a>d ,also 4d<a __SCREAMING_SNAKE_CASE : Dict = sum(1 for x in frequency[1:limit] if x == 10 ) return count if __name__ == "__main__": print(f'''{solution() = }''')
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
lowercase_ = """Input must be a string of 8 numbers plus letter""" lowercase_ = """TRWAGMYFPDXBNJZSQVHLCKE""" def a__ ( snake_case ): """simple docstring""" if not isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = F'''Expected string as input, found {type(snake_case ).__name__}''' raise TypeError(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = spanish_id.replace('''-''' , '''''' ).upper() if len(snake_case ) != 9: raise ValueError(snake_case ) try: __SCREAMING_SNAKE_CASE : Optional[Any] = int(spanish_id_clean[0:8] ) __SCREAMING_SNAKE_CASE : Tuple = spanish_id_clean[8] except ValueError as ex: raise ValueError(snake_case ) from ex if letter.isdigit(): raise ValueError(snake_case ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path import torch from ...utils import is_npu_available, is_xpu_available from .config_args import ClusterConfig, default_json_config_file from .config_utils import SubcommandHelpFormatter lowercase_ = """Create a default config file for Accelerate with only a few flags set.""" def a__ ( snake_case="no" , snake_case = default_json_config_file , snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) if path.exists(): print( F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' ) return False __SCREAMING_SNAKE_CASE : List[str] = mixed_precision.lower() if mixed_precision not in ["no", "fp16", "bf16", "fp8"]: raise ValueError( F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' ) __SCREAMING_SNAKE_CASE : str = { '''compute_environment''': '''LOCAL_MACHINE''', '''mixed_precision''': mixed_precision, } if torch.cuda.is_available(): __SCREAMING_SNAKE_CASE : Dict = torch.cuda.device_count() __SCREAMING_SNAKE_CASE : str = num_gpus __SCREAMING_SNAKE_CASE : List[Any] = False if num_gpus > 1: __SCREAMING_SNAKE_CASE : List[Any] = '''MULTI_GPU''' else: __SCREAMING_SNAKE_CASE : Dict = '''NO''' elif is_xpu_available() and use_xpu: __SCREAMING_SNAKE_CASE : str = torch.xpu.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_xpus __SCREAMING_SNAKE_CASE : Optional[Any] = False if num_xpus > 1: __SCREAMING_SNAKE_CASE : List[Any] = '''MULTI_XPU''' else: __SCREAMING_SNAKE_CASE : int = '''NO''' elif is_npu_available(): __SCREAMING_SNAKE_CASE : Optional[int] = torch.npu.device_count() __SCREAMING_SNAKE_CASE : Optional[Any] = num_npus __SCREAMING_SNAKE_CASE : List[str] = False if num_npus > 1: __SCREAMING_SNAKE_CASE : Optional[Any] = '''MULTI_NPU''' else: __SCREAMING_SNAKE_CASE : Any = '''NO''' else: __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : List[str] = True __SCREAMING_SNAKE_CASE : Tuple = 1 __SCREAMING_SNAKE_CASE : Dict = '''NO''' __SCREAMING_SNAKE_CASE : Optional[Any] = ClusterConfig(**snake_case ) config.to_json_file(snake_case ) return path def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = parser.add_parser('''default''' , parents=snake_case , help=snake_case , formatter_class=snake_case ) parser.add_argument( '''--config_file''' , default=snake_case , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , dest='''save_location''' , ) parser.add_argument( '''--mixed_precision''' , choices=['''no''', '''fp16''', '''bf16'''] , type=snake_case , help='''Whether or not to use mixed precision training. ''' '''Choose between FP16 and BF16 (bfloat16) training. ''' '''BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.''' , default='''no''' , ) parser.set_defaults(func=snake_case ) return parser def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = write_basic_config(args.mixed_precision , args.save_location ) if config_file: print(F'''accelerate configuration saved at {config_file}''' )
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowercase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCAmelCase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowerCAmelCase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowerCAmelCase_ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) __SCREAMING_SNAKE_CASE : Tuple = text_classifier('''This is great !''' , top_k=2 ) self.assertEqual( nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}] ) __SCREAMING_SNAKE_CASE : Optional[int] = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 ) self.assertEqual( nested_simplify(_A ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], ] , ) __SCREAMING_SNAKE_CASE : str = text_classifier('''This is great !''' , top_k=1 ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) # Legacy behavior __SCREAMING_SNAKE_CASE : str = text_classifier('''This is great !''' , return_all_scores=_A ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) __SCREAMING_SNAKE_CASE : str = text_classifier('''This is great !''' , return_all_scores=_A ) self.assertEqual( nested_simplify(_A ) , [[{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}]] ) __SCREAMING_SNAKE_CASE : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=_A ) self.assertEqual( nested_simplify(_A ) , [ [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], [{'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_1''', '''score''': 0.4_96}], ] , ) __SCREAMING_SNAKE_CASE : Dict = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=_A ) self.assertEqual( nested_simplify(_A ) , [ {'''label''': '''LABEL_0''', '''score''': 0.5_04}, {'''label''': '''LABEL_0''', '''score''': 0.5_04}, ] , ) @require_torch def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" import torch __SCREAMING_SNAKE_CASE : Tuple = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , ) __SCREAMING_SNAKE_CASE : List[Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) @require_tf def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = pipeline( task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''LABEL_0''', '''score''': 0.5_04}] ) @slow @require_torch def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = pipeline('''text-classification''' ) __SCREAMING_SNAKE_CASE : List[Any] = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) __SCREAMING_SNAKE_CASE : Dict = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) __SCREAMING_SNAKE_CASE : str = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] ) @slow @require_tf def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = pipeline('''text-classification''' , framework='''tf''' ) __SCREAMING_SNAKE_CASE : Tuple = text_classifier('''This is great !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] ) __SCREAMING_SNAKE_CASE : Dict = text_classifier('''This is bad !''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] ) __SCREAMING_SNAKE_CASE : Tuple = text_classifier('''Birds are a type of animal''' ) self.assertEqual(nested_simplify(_A ) , [{'''label''': '''POSITIVE''', '''score''': 0.9_88}] ) def UpperCAmelCase__ ( self : str , _A : int , _A : Any , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = TextClassificationPipeline(model=_A , tokenizer=_A ) return text_classifier, ["HuggingFace is in", "This is another test"] def UpperCAmelCase__ ( self : List[Any] , _A : Optional[int] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __SCREAMING_SNAKE_CASE : Optional[Any] = '''HuggingFace is in''' __SCREAMING_SNAKE_CASE : Optional[int] = text_classifier(_A ) self.assertEqual(nested_simplify(_A ) , [{'''label''': ANY(_A ), '''score''': ANY(_A )}] ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) __SCREAMING_SNAKE_CASE : List[str] = ['''HuggingFace is in ''', '''Paris is in France'''] __SCREAMING_SNAKE_CASE : int = text_classifier(_A ) self.assertEqual( nested_simplify(_A ) , [{'''label''': ANY(_A ), '''score''': ANY(_A )}, {'''label''': ANY(_A ), '''score''': ANY(_A )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __SCREAMING_SNAKE_CASE : List[Any] = text_classifier(_A , top_k=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(_A ) , [[{'''label''': ANY(_A ), '''score''': ANY(_A )}] * N, [{'''label''': ANY(_A ), '''score''': ANY(_A )}] * N] , ) __SCREAMING_SNAKE_CASE : Tuple = {'''text''': '''HuggingFace is in ''', '''text_pair''': '''Paris is in France'''} __SCREAMING_SNAKE_CASE : List[Any] = text_classifier(_A ) self.assertEqual( nested_simplify(_A ) , {'''label''': ANY(_A ), '''score''': ANY(_A )} , ) self.assertTrue(outputs['''label'''] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __SCREAMING_SNAKE_CASE : Optional[int] = [['''HuggingFace is in ''', '''Paris is in France''']] with self.assertRaises(_A ): text_classifier(_A ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __SCREAMING_SNAKE_CASE : Any = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] ) self.assertEqual( nested_simplify(_A ) , [{'''label''': ANY(_A ), '''score''': ANY(_A )}] , ) self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
from abc import ABC, abstractmethod from typing import List, Optional class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Union[str, Any] ): """simple docstring""" self.test() def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = 0 __SCREAMING_SNAKE_CASE : str = False while not completed: if counter == 1: self.reset() __SCREAMING_SNAKE_CASE : int = self.advance() if not self.does_advance(_A ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = self.update(_A ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Tuple , _A : int ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Any , _A : int ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) @abstractmethod def UpperCAmelCase__ ( self : Tuple , _A : Dict=False ): """simple docstring""" raise NotImplementedError( F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : int , _A : List[int] ): """simple docstring""" super(_A , self ).__init__() if not isinstance(_A , _A ) or len(_A ) == 0: raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' ) if any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ): raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' ) __SCREAMING_SNAKE_CASE : Tuple = token_ids __SCREAMING_SNAKE_CASE : List[Any] = len(self.token_ids ) __SCREAMING_SNAKE_CASE : Optional[Any] = -1 # the index of the currently fulfilled step __SCREAMING_SNAKE_CASE : int = False def UpperCAmelCase__ ( self : Dict ): """simple docstring""" if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase__ ( self : Tuple , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_A )}''' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def UpperCAmelCase__ ( self : str , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(_A )}''' ) __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Tuple = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_A ): self.fulfilled_idx += 1 __SCREAMING_SNAKE_CASE : int = True if self.fulfilled_idx == (self.seqlen - 1): __SCREAMING_SNAKE_CASE : Any = True __SCREAMING_SNAKE_CASE : Optional[Any] = completed else: # failed to make progress. __SCREAMING_SNAKE_CASE : Optional[int] = True self.reset() return stepped, completed, reset def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : List[Any] = 0 def UpperCAmelCase__ ( self : int ): """simple docstring""" return self.seqlen - (self.fulfilled_idx + 1) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any]=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = PhrasalConstraint(self.token_ids ) if stateful: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.seqlen __SCREAMING_SNAKE_CASE : Optional[int] = self.fulfilled_idx __SCREAMING_SNAKE_CASE : Dict = self.completed return new_constraint class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[int] , _A : List[List[int]] , _A : Tuple=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = max([len(_A ) for one in nested_token_ids] ) __SCREAMING_SNAKE_CASE : List[str] = {} for token_ids in nested_token_ids: __SCREAMING_SNAKE_CASE : List[str] = root for tidx, token_id in enumerate(_A ): if token_id not in level: __SCREAMING_SNAKE_CASE : Optional[Any] = {} __SCREAMING_SNAKE_CASE : Dict = level[token_id] if no_subsets and self.has_subsets(_A , _A ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' F''' {nested_token_ids}.''' ) __SCREAMING_SNAKE_CASE : Dict = root def UpperCAmelCase__ ( self : Dict , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.trie for current_token in current_seq: __SCREAMING_SNAKE_CASE : str = start[current_token] __SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() ) return next_tokens def UpperCAmelCase__ ( self : Tuple , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.next_tokens(_A ) return len(_A ) == 0 def UpperCAmelCase__ ( self : int , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = list(root.values() ) if len(_A ) == 0: return 1 else: return sum([self.count_leaves(_A ) for nn in next_nodes] ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.count_leaves(_A ) return len(_A ) != leaf_count class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : str , _A : List[List[int]] ): """simple docstring""" super(_A , self ).__init__() if not isinstance(_A , _A ) or len(_A ) == 0: raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' ) if any(not isinstance(_A , _A ) for token_ids in nested_token_ids ): raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' ) if any( any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveTrie(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = nested_token_ids __SCREAMING_SNAKE_CASE : Any = self.trie.max_height __SCREAMING_SNAKE_CASE : List[Any] = [] __SCREAMING_SNAKE_CASE : Optional[int] = False def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) if len(_A ) == 0: return None else: return token_list def UpperCAmelCase__ ( self : Union[str, Any] , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}''' ) __SCREAMING_SNAKE_CASE : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def UpperCAmelCase__ ( self : List[str] , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}''' ) __SCREAMING_SNAKE_CASE : int = False __SCREAMING_SNAKE_CASE : Dict = False __SCREAMING_SNAKE_CASE : Union[str, Any] = False if self.does_advance(_A ): self.current_seq.append(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = True else: __SCREAMING_SNAKE_CASE : Tuple = True self.reset() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.trie.reached_leaf(self.current_seq ) __SCREAMING_SNAKE_CASE : Optional[Any] = completed return stepped, completed, reset def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = False __SCREAMING_SNAKE_CASE : Any = [] def UpperCAmelCase__ ( self : Any ): """simple docstring""" if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def UpperCAmelCase__ ( self : Dict , _A : List[str]=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(self.token_ids ) if stateful: __SCREAMING_SNAKE_CASE : Dict = self.seqlen __SCREAMING_SNAKE_CASE : Tuple = self.current_seq __SCREAMING_SNAKE_CASE : Optional[int] = self.completed return new_constraint class __UpperCamelCase : """simple docstring""" def __init__( self : Dict , _A : List[Constraint] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = constraints # max # of steps required to fulfill a given constraint __SCREAMING_SNAKE_CASE : Dict = max([c.seqlen for c in constraints] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A ) __SCREAMING_SNAKE_CASE : List[str] = False self.init_state() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Union[str, Any] = [constraint.copy(stateful=_A ) for constraint in self.constraints] def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __SCREAMING_SNAKE_CASE : Any = constraint.advance() if isinstance(_A , _A ): token_list.append(_A ) elif isinstance(_A , _A ): token_list.extend(_A ) else: __SCREAMING_SNAKE_CASE : Any = self.inprogress_constraint.advance() if isinstance(_A , _A ): token_list.append(_A ) elif isinstance(_A , _A ): token_list.extend(_A ) if len(_A ) == 0: return None else: return token_list def UpperCAmelCase__ ( self : int , _A : Optional[List[int]] ): """simple docstring""" self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.add(_A ) # the entire list of constraints are fulfilled if self.completed: break def UpperCAmelCase__ ( self : Optional[int] , _A : int ): """simple docstring""" if not isinstance(_A , _A ): raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = False, False if self.completed: __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : List[str] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.inprogress_constraint.update(_A ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_A ) ) __SCREAMING_SNAKE_CASE : int = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None if len(self.pending_constraints ) == 0: # we're done! __SCREAMING_SNAKE_CASE : List[str] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_A ): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pending_constraint.update(_A ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(_A ) __SCREAMING_SNAKE_CASE : Tuple = None if not complete and stepped: __SCREAMING_SNAKE_CASE : List[str] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __SCREAMING_SNAKE_CASE : Optional[int] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __SCREAMING_SNAKE_CASE : Any = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def UpperCAmelCase__ ( self : Union[str, Any] , _A : int=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __SCREAMING_SNAKE_CASE : List[str] = [ constraint.copy(stateful=_A ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __SCREAMING_SNAKE_CASE : Tuple = self.inprogress_constraint.copy(stateful=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints] return new_state
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowercase_ = True except ImportError: lowercase_ = False try: from torch.hub import _get_torch_home lowercase_ = _get_torch_home() except ImportError: lowercase_ = os.path.expanduser( os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch""")) ) lowercase_ = os.path.join(torch_cache_home, """transformers""") lowercase_ = """https://cdn.huggingface.co""" lowercase_ = """https://s3.amazonaws.com/models.huggingface.co/bert""" lowercase_ = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1]) lowercase_ = os.path.join(PATH, """config.yaml""") lowercase_ = os.path.join(PATH, """attributes.txt""") lowercase_ = os.path.join(PATH, """objects.txt""") lowercase_ = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path) lowercase_ = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE) lowercase_ = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE) lowercase_ = """pytorch_model.bin""" lowercase_ = """config.yaml""" def a__ ( snake_case=OBJECTS , snake_case=ATTRIBUTES ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [] with open(snake_case ) as f: for object in f.readlines(): vg_classes.append(object.split(''',''' )[0].lower().strip() ) __SCREAMING_SNAKE_CASE : List[str] = [] with open(snake_case ) as f: for object in f.readlines(): vg_attrs.append(object.split(''',''' )[0].lower().strip() ) return vg_classes, vg_attrs def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = OrderedDict() with open(snake_case , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = pkl.load(snake_case )['''model'''] for k in copy.deepcopy(list(ckp.keys() ) ): __SCREAMING_SNAKE_CASE : int = ckp.pop(snake_case ) if isinstance(snake_case , np.ndarray ): __SCREAMING_SNAKE_CASE : List[str] = torch.tensor(snake_case ) else: assert isinstance(snake_case , torch.tensor ), type(snake_case ) __SCREAMING_SNAKE_CASE : Any = v return r class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = {} def __init__( self : Dict , _A : dict , _A : str = "root" , _A : str=0 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = name __SCREAMING_SNAKE_CASE : Dict = level __SCREAMING_SNAKE_CASE : Tuple = {} for k, v in dictionary.items(): if v is None: raise ValueError() __SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(_A ) __SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(_A ) if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = Config(_A , name=_A , level=level + 1 ) __SCREAMING_SNAKE_CASE : str = v setattr(self , _A , _A ) __SCREAMING_SNAKE_CASE : Optional[int] = d def __repr__( self : List[str] ): """simple docstring""" return str(list((self._pointer.keys()) ) ) def __setattr__( self : List[str] , _A : List[str] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = val __SCREAMING_SNAKE_CASE : Optional[Any] = val __SCREAMING_SNAKE_CASE : Dict = key.split('''.''' ) __SCREAMING_SNAKE_CASE : List[str] = len(_A ) - 1 __SCREAMING_SNAKE_CASE : Optional[int] = self._pointer if len(_A ) > 1: for i, l in enumerate(_A ): if hasattr(self , _A ) and isinstance(getattr(self , _A ) , _A ): setattr(getattr(self , _A ) , '''.'''.join(levels[i:] ) , _A ) if l == last_level: __SCREAMING_SNAKE_CASE : List[str] = val else: __SCREAMING_SNAKE_CASE : Optional[int] = pointer[l] def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return self._pointer def UpperCAmelCase__ ( self : Any , _A : int , _A : Tuple ): """simple docstring""" with open(F'''{file_name}''' , '''w''' ) as stream: dump(_A , _A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : List[Any] , _A : Tuple ): """simple docstring""" with open(F'''{file_name}''' , '''w''' ) as stream: json.dump(_A , _A ) @staticmethod def UpperCAmelCase__ ( _A : List[Any] ): """simple docstring""" with open(_A ) as stream: __SCREAMING_SNAKE_CASE : Optional[Any] = load(_A , Loader=_A ) return data def __str__( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ''' ''' if self._name != "root": __SCREAMING_SNAKE_CASE : Optional[Any] = F'''{t * (self._level-1)}{self._name}:\n''' else: __SCREAMING_SNAKE_CASE : Optional[int] = '''''' __SCREAMING_SNAKE_CASE : str = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(_A , _A ): r += F'''{t * (self._level)}{v}\n''' self._level += 1 else: r += F'''{t * (self._level)}{k}: {v} ({type(_A ).__name__})\n''' __SCREAMING_SNAKE_CASE : Optional[Any] = level return r[:-1] @classmethod def UpperCAmelCase__ ( cls : Optional[Any] , _A : str , **_A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(_A , **_A ) return cls(_A ) @classmethod def UpperCAmelCase__ ( cls : Union[str, Any] , _A : str , **_A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''cache_dir''' , _A ) __SCREAMING_SNAKE_CASE : str = kwargs.pop('''force_download''' , _A ) __SCREAMING_SNAKE_CASE : Any = kwargs.pop('''resume_download''' , _A ) __SCREAMING_SNAKE_CASE : Tuple = kwargs.pop('''proxies''' , _A ) __SCREAMING_SNAKE_CASE : Any = kwargs.pop('''local_files_only''' , _A ) if os.path.isdir(_A ): __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_A , _A ) elif os.path.isfile(_A ) or is_remote_url(_A ): __SCREAMING_SNAKE_CASE : Dict = pretrained_model_name_or_path else: __SCREAMING_SNAKE_CASE : List[str] = hf_bucket_url(_A , filename=_A , use_cdn=_A ) try: # Load from URL or cache if already cached __SCREAMING_SNAKE_CASE : str = cached_path( _A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , local_files_only=_A , ) # Load config dict if resolved_config_file is None: raise EnvironmentError __SCREAMING_SNAKE_CASE : str = Config.load_yaml(_A ) except EnvironmentError: __SCREAMING_SNAKE_CASE : Optional[int] = '''Can\'t load config for''' raise EnvironmentError(_A ) if resolved_config_file == config_file: print('''loading configuration file from path''' ) else: print('''loading configuration file cache''' ) return Config.load_yaml(_A ), kwargs def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load('''dump.pt''' , map_location=in_tensor.device ) __SCREAMING_SNAKE_CASE : int = in_tensor.numpy() __SCREAMING_SNAKE_CASE : List[Any] = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ), ( F'''{sum([1 for x in np.isclose(snake_case , snake_case , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %''' " element-wise mismatch" ) raise Exception('''tensors are all good''' ) # Hugging face functions below def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = urlparse(snake_case ) return parsed.scheme in ("http", "https") def a__ ( snake_case , snake_case , snake_case=True ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX __SCREAMING_SNAKE_CASE : Optional[Any] = '''/''' not in model_id if legacy_format: return F'''{endpoint}/{model_id}-{filename}''' else: return F'''{endpoint}/{model_id}/{filename}''' def a__ ( snake_case , snake_case , snake_case=None , snake_case=0 , snake_case=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = '''python/{}'''.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(snake_case , snake_case ): ua += "; " + "; ".join('''{}/{}'''.format(snake_case , snake_case ) for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent __SCREAMING_SNAKE_CASE : Dict = {'''user-agent''': ua} if resume_size > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''bytes=%d-''' % (resume_size,) __SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(snake_case , stream=snake_case , proxies=snake_case , headers=snake_case ) if response.status_code == 416: # Range not satisfiable return __SCREAMING_SNAKE_CASE : str = response.headers.get('''Content-Length''' ) __SCREAMING_SNAKE_CASE : Dict = resume_size + int(snake_case ) if content_length is not None else None __SCREAMING_SNAKE_CASE : Dict = tqdm( unit='''B''' , unit_scale=snake_case , total=snake_case , initial=snake_case , desc='''Downloading''' , ) for chunk in response.iter_content(chunk_size=1_024 ): if chunk: # filter out keep-alive new chunks progress.update(len(snake_case ) ) temp_file.write(snake_case ) progress.close() def a__ ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=10 , snake_case=False , snake_case=None , snake_case=False , ): """simple docstring""" if cache_dir is None: __SCREAMING_SNAKE_CASE : Optional[int] = TRANSFORMERS_CACHE if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = str(snake_case ) os.makedirs(snake_case , exist_ok=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = None if not local_files_only: try: __SCREAMING_SNAKE_CASE : List[Any] = requests.head(snake_case , allow_redirects=snake_case , proxies=snake_case , timeout=snake_case ) if response.status_code == 200: __SCREAMING_SNAKE_CASE : str = response.headers.get('''ETag''' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass __SCREAMING_SNAKE_CASE : Tuple = url_to_filename(snake_case , snake_case ) # get cache path to put the file __SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(snake_case , snake_case ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(snake_case ): return cache_path else: __SCREAMING_SNAKE_CASE : List[str] = [ file for file in fnmatch.filter(os.listdir(snake_case ) , filename + '''.*''' ) if not file.endswith('''.json''' ) and not file.endswith('''.lock''' ) ] if len(snake_case ) > 0: return os.path.join(snake_case , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( '''Cannot find the requested files in the cached path and outgoing traffic has been''' ''' disabled. To enable model look-ups and downloads online, set \'local_files_only\'''' ''' to False.''' ) return None # From now on, etag is not None. if os.path.exists(snake_case ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. __SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.lock''' with FileLock(snake_case ): # If the download just completed while the lock was activated. if os.path.exists(snake_case ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: __SCREAMING_SNAKE_CASE : Optional[int] = cache_path + '''.incomplete''' @contextmanager def _resumable_file_manager(): with open(snake_case , '''a+b''' ) as f: yield f __SCREAMING_SNAKE_CASE : Dict = _resumable_file_manager if os.path.exists(snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = os.stat(snake_case ).st_size else: __SCREAMING_SNAKE_CASE : str = 0 else: __SCREAMING_SNAKE_CASE : Dict = partial(tempfile.NamedTemporaryFile , dir=snake_case , delete=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '''%s not found in cache or force_download set to True, downloading to %s''' , snake_case , temp_file.name , ) http_get( snake_case , snake_case , proxies=snake_case , resume_size=snake_case , user_agent=snake_case , ) os.replace(temp_file.name , snake_case ) __SCREAMING_SNAKE_CASE : Dict = {'''url''': url, '''etag''': etag} __SCREAMING_SNAKE_CASE : List[str] = cache_path + '''.json''' with open(snake_case , '''w''' ) as meta_file: json.dump(snake_case , snake_case ) return cache_path def a__ ( snake_case , snake_case=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = url.encode('''utf-8''' ) __SCREAMING_SNAKE_CASE : Tuple = shaaaa(snake_case ) __SCREAMING_SNAKE_CASE : str = url_hash.hexdigest() if etag: __SCREAMING_SNAKE_CASE : Any = etag.encode('''utf-8''' ) __SCREAMING_SNAKE_CASE : Optional[int] = shaaaa(snake_case ) filename += "." + etag_hash.hexdigest() if url.endswith('''.h5''' ): filename += ".h5" return filename def a__ ( snake_case , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=None , snake_case=False , snake_case=False , snake_case=False , ): """simple docstring""" if cache_dir is None: __SCREAMING_SNAKE_CASE : Union[str, Any] = TRANSFORMERS_CACHE if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = str(snake_case ) if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = str(snake_case ) if is_remote_url(snake_case ): # URL, so get it from the cache (downloading if necessary) __SCREAMING_SNAKE_CASE : Optional[Any] = get_from_cache( snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , user_agent=snake_case , local_files_only=snake_case , ) elif os.path.exists(snake_case ): # File, and it exists. __SCREAMING_SNAKE_CASE : Optional[Any] = url_or_filename elif urlparse(snake_case ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('''file {} not found'''.format(snake_case ) ) else: # Something unknown raise ValueError('''unable to parse {} as a URL or as a local path'''.format(snake_case ) ) if extract_compressed_file: if not is_zipfile(snake_case ) and not tarfile.is_tarfile(snake_case ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = os.path.split(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = output_file.replace('''.''' , '''-''' ) + '''-extracted''' __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , snake_case ) if os.path.isdir(snake_case ) and os.listdir(snake_case ) and not force_extract: return output_path_extracted # Prevent parallel extractions __SCREAMING_SNAKE_CASE : Dict = output_path + '''.lock''' with FileLock(snake_case ): shutil.rmtree(snake_case , ignore_errors=snake_case ) os.makedirs(snake_case ) if is_zipfile(snake_case ): with ZipFile(snake_case , '''r''' ) as zip_file: zip_file.extractall(snake_case ) zip_file.close() elif tarfile.is_tarfile(snake_case ): __SCREAMING_SNAKE_CASE : Optional[Any] = tarfile.open(snake_case ) tar_file.extractall(snake_case ) tar_file.close() else: raise EnvironmentError('''Archive format of {} could not be identified'''.format(snake_case ) ) return output_path_extracted return output_path def a__ ( snake_case , snake_case="," ): """simple docstring""" assert isinstance(snake_case , snake_case ) if os.path.isfile(snake_case ): with open(snake_case ) as f: __SCREAMING_SNAKE_CASE : Optional[int] = eval(f.read() ) else: __SCREAMING_SNAKE_CASE : int = requests.get(snake_case ) try: __SCREAMING_SNAKE_CASE : str = requests.json() except Exception: __SCREAMING_SNAKE_CASE : List[Any] = req.content.decode() assert data is not None, "could not connect" try: __SCREAMING_SNAKE_CASE : Union[str, Any] = eval(snake_case ) except Exception: __SCREAMING_SNAKE_CASE : Any = data.split('''\n''' ) req.close() return data def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = requests.get(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = np.array(Image.open(BytesIO(response.content ) ) ) return img def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = url.split('''/''' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(snake_case ) with open(snake_case , '''rb''' ) as stream: __SCREAMING_SNAKE_CASE : str = pkl.load(snake_case ) __SCREAMING_SNAKE_CASE : int = weights.pop('''model''' ) __SCREAMING_SNAKE_CASE : str = {} for k, v in model.items(): __SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(snake_case ) if "running_var" in k: __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0] ) __SCREAMING_SNAKE_CASE : Optional[int] = k.replace('''running_var''' , '''num_batches_tracked''' ) __SCREAMING_SNAKE_CASE : Any = zero return new def a__ ( ): """simple docstring""" print(F'''{os.path.abspath(os.path.join(snake_case , os.pardir ) )}/demo.ipynb''' ) def a__ ( snake_case , snake_case="RGB" ): """simple docstring""" assert isinstance(snake_case , snake_case ) if os.path.isfile(snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = cva.imread(snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = get_image_from_url(snake_case ) assert img is not None, F'''could not connect to: {im}''' __SCREAMING_SNAKE_CASE : int = cva.cvtColor(snake_case , cva.COLOR_BGR2RGB ) if input_format == "RGB": __SCREAMING_SNAKE_CASE : Dict = img[:, :, ::-1] return img def a__ ( snake_case , snake_case=1 ): """simple docstring""" return (images[i : i + batch] for i in range(0 , len(snake_case ) , snake_case ))
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = [0] * len(snake_case ) for i in range(1 , len(snake_case ) ): # use last results for better performance - dynamic programming __SCREAMING_SNAKE_CASE : str = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: __SCREAMING_SNAKE_CASE : List[Any] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 __SCREAMING_SNAKE_CASE : int = j return prefix_result def a__ ( snake_case ): """simple docstring""" return max(prefix_function(snake_case ) ) if __name__ == "__main__": import doctest doctest.testmod()
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowercase_ = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''mra''' def __init__( self : List[Any] , _A : Any=5_0265 , _A : Tuple=768 , _A : Any=12 , _A : Union[str, Any]=12 , _A : str=3072 , _A : int="gelu" , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : List[str]=512 , _A : Tuple=1 , _A : Union[str, Any]=0.02 , _A : List[str]=1e-5 , _A : Optional[int]="absolute" , _A : List[str]=4 , _A : int="full" , _A : Optional[Any]=0 , _A : int=0 , _A : int=1 , _A : Union[str, Any]=0 , _A : int=2 , **_A : Optional[Any] , ): """simple docstring""" super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A ) __SCREAMING_SNAKE_CASE : Tuple = vocab_size __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : int = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : int = hidden_act __SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = initializer_range __SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : List[Any] = position_embedding_type __SCREAMING_SNAKE_CASE : Optional[Any] = block_per_row __SCREAMING_SNAKE_CASE : Union[str, Any] = approx_mode __SCREAMING_SNAKE_CASE : str = initial_prior_first_n_blocks __SCREAMING_SNAKE_CASE : Tuple = initial_prior_diagonal_n_blocks
74
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
1
def a__ ( snake_case ): """simple docstring""" if edge <= 0 or not isinstance(snake_case , snake_case ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def a__ ( snake_case ): """simple docstring""" if edge <= 0 or not isinstance(snake_case , snake_case ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
74
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
1
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar lowercase_ = TypeVar("""T""") class __UpperCamelCase ( Generic[T] ): """simple docstring""" def __init__( self : List[Any] , _A : list[T] , _A : Callable[[T, T], T] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any | T = None __SCREAMING_SNAKE_CASE : int = len(_A ) __SCREAMING_SNAKE_CASE : list[T] = [any_type for _ in range(self.N )] + arr __SCREAMING_SNAKE_CASE : Optional[int] = fnc self.build() def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" for p in range(self.N - 1 , 0 , -1 ): __SCREAMING_SNAKE_CASE : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCAmelCase__ ( self : str , _A : int , _A : T ): """simple docstring""" p += self.N __SCREAMING_SNAKE_CASE : Tuple = v while p > 1: __SCREAMING_SNAKE_CASE : Union[str, Any] = p // 2 __SCREAMING_SNAKE_CASE : Any = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def UpperCAmelCase__ ( self : int , _A : int , _A : int ): # noqa: E741 """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = l + self.N, r + self.N __SCREAMING_SNAKE_CASE : T | None = None while l <= r: if l % 2 == 1: __SCREAMING_SNAKE_CASE : Optional[int] = self.st[l] if res is None else self.fn(_A , self.st[l] ) if r % 2 == 0: __SCREAMING_SNAKE_CASE : int = self.st[r] if res is None else self.fn(_A , self.st[r] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce lowercase_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] lowercase_ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } lowercase_ = SegmentTree(test_array, min) lowercase_ = SegmentTree(test_array, max) lowercase_ = SegmentTree(test_array, lambda a, b: a + b) def a__ ( ): """simple docstring""" for i in range(len(snake_case ) ): for j in range(snake_case , len(snake_case ) ): __SCREAMING_SNAKE_CASE : Any = reduce(snake_case , test_array[i : j + 1] ) __SCREAMING_SNAKE_CASE : Tuple = reduce(snake_case , test_array[i : j + 1] ) __SCREAMING_SNAKE_CASE : str = reduce(lambda snake_case , snake_case : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case , snake_case ) assert max_range == max_segment_tree.query(snake_case , snake_case ) assert sum_range == sum_segment_tree.query(snake_case , snake_case ) test_all_segments() for index, value in test_updates.items(): lowercase_ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
74
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
1
import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''char''' lowerCAmelCase_ = '''bpe''' lowerCAmelCase_ = '''wp''' lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''image_processor''', '''char_tokenizer'''] lowerCAmelCase_ = '''ViTImageProcessor''' lowerCAmelCase_ = '''MgpstrTokenizer''' def __init__( self : int , _A : str=None , _A : List[str]=None , **_A : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _A , ) __SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''feature_extractor''' ) __SCREAMING_SNAKE_CASE : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer __SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''gpt2''' ) __SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained('''bert-base-uncased''' ) super().__init__(_A , _A ) def __call__( self : int , _A : str=None , _A : List[Any]=None , _A : List[Any]=None , **_A : str ): """simple docstring""" if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(_A , return_tensors=_A , **_A ) if text is not None: __SCREAMING_SNAKE_CASE : Any = self.char_tokenizer(_A , return_tensors=_A , **_A ) if text is None: return inputs elif images is None: return encodings else: __SCREAMING_SNAKE_CASE : str = encodings['''input_ids'''] return inputs def UpperCAmelCase__ ( self : List[Any] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = sequences __SCREAMING_SNAKE_CASE : Tuple = char_preds.size(0 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self._decode_helper(_A , '''char''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''bpe''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = self._decode_helper(_A , '''wp''' ) __SCREAMING_SNAKE_CASE : Tuple = [] __SCREAMING_SNAKE_CASE : Optional[int] = [] for i in range(_A ): __SCREAMING_SNAKE_CASE : Any = [char_scores[i], bpe_scores[i], wp_scores[i]] __SCREAMING_SNAKE_CASE : str = [char_strs[i], bpe_strs[i], wp_strs[i]] __SCREAMING_SNAKE_CASE : Any = scores.index(max(_A ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) __SCREAMING_SNAKE_CASE : str = {} __SCREAMING_SNAKE_CASE : Optional[Any] = final_strs __SCREAMING_SNAKE_CASE : Union[str, Any] = final_scores __SCREAMING_SNAKE_CASE : int = char_strs __SCREAMING_SNAKE_CASE : List[Any] = bpe_strs __SCREAMING_SNAKE_CASE : List[str] = wp_strs return out def UpperCAmelCase__ ( self : Any , _A : Optional[int] , _A : List[Any] ): """simple docstring""" if format == DecodeType.CHARACTER: __SCREAMING_SNAKE_CASE : Any = self.char_decode __SCREAMING_SNAKE_CASE : Any = 1 __SCREAMING_SNAKE_CASE : Dict = '''[s]''' elif format == DecodeType.BPE: __SCREAMING_SNAKE_CASE : Dict = self.bpe_decode __SCREAMING_SNAKE_CASE : Dict = 2 __SCREAMING_SNAKE_CASE : Optional[int] = '''#''' elif format == DecodeType.WORDPIECE: __SCREAMING_SNAKE_CASE : str = self.wp_decode __SCREAMING_SNAKE_CASE : Any = 102 __SCREAMING_SNAKE_CASE : Optional[int] = '''[SEP]''' else: raise ValueError(F'''Format {format} is not supported.''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = [], [] __SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.size(0 ) __SCREAMING_SNAKE_CASE : Tuple = pred_logits.size(1 ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = preds_index.view(-1 , _A )[:, 1:] __SCREAMING_SNAKE_CASE : Dict = decoder(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 ) __SCREAMING_SNAKE_CASE : List[str] = preds_max_prob[:, 1:] for index in range(_A ): __SCREAMING_SNAKE_CASE : Any = preds_str[index].find(_A ) __SCREAMING_SNAKE_CASE : int = preds_str[index][:pred_eos] __SCREAMING_SNAKE_CASE : Optional[int] = preds_index[index].cpu().tolist() __SCREAMING_SNAKE_CASE : Union[str, Any] = pred_index.index(_A ) if eos_token in pred_index else -1 __SCREAMING_SNAKE_CASE : Optional[int] = preds_max_prob[index][: pred_eos_index + 1] __SCREAMING_SNAKE_CASE : Tuple = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(_A ) conf_scores.append(_A ) return dec_strs, conf_scores def UpperCAmelCase__ ( self : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )] return decode_strs def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" return self.bpe_tokenizer.batch_decode(_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )] return decode_strs
74
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
1
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
1
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowercase_ = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def a__ ( snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case ) def a__ ( snake_case ): """simple docstring""" from diffusers.utils.testing_utils import pytest_terminal_summary_main __SCREAMING_SNAKE_CASE : List[str] = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(snake_case , id=snake_case )
74
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger(__name__) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: __SCREAMING_SNAKE_CASE : Tuple = 128 elif "12-12" in model_name: __SCREAMING_SNAKE_CASE : List[str] = 12 __SCREAMING_SNAKE_CASE : str = 12 elif "14-14" in model_name: __SCREAMING_SNAKE_CASE : Tuple = 14 __SCREAMING_SNAKE_CASE : int = 14 elif "16-16" in model_name: __SCREAMING_SNAKE_CASE : List[str] = 16 __SCREAMING_SNAKE_CASE : Tuple = 16 else: raise ValueError('''Model not supported''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = '''huggingface/label-files''' if "speech-commands" in model_name: __SCREAMING_SNAKE_CASE : int = 35 __SCREAMING_SNAKE_CASE : Tuple = '''speech-commands-v2-id2label.json''' else: __SCREAMING_SNAKE_CASE : Union[str, Any] = 527 __SCREAMING_SNAKE_CASE : Optional[Any] = '''audioset-id2label.json''' __SCREAMING_SNAKE_CASE : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : int = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : Dict = idalabel __SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()} return config def a__ ( snake_case ): """simple docstring""" if "module.v" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __SCREAMING_SNAKE_CASE : Tuple = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: __SCREAMING_SNAKE_CASE : int = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Optional[Any] = orig_state_dict.pop(snake_case ) if "qkv" in key: __SCREAMING_SNAKE_CASE : List[str] = key.split('''.''' ) __SCREAMING_SNAKE_CASE : Any = int(key_split[3] ) __SCREAMING_SNAKE_CASE : Tuple = config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : List[Any] = val[:dim, :] __SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim] __SCREAMING_SNAKE_CASE : List[str] = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : List[Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(snake_case , snake_case ) @torch.no_grad() def a__ ( snake_case , snake_case , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = get_audio_spectrogram_transformer_config(snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict __SCREAMING_SNAKE_CASE : int = model_name_to_url[model_name] __SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(snake_case , map_location='''cpu''' ) # remove some keys remove_keys(snake_case ) # rename some keys __SCREAMING_SNAKE_CASE : Dict = convert_state_dict(snake_case , snake_case ) # load 🤗 model __SCREAMING_SNAKE_CASE : Any = ASTForAudioClassification(snake_case ) model.eval() model.load_state_dict(snake_case ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 __SCREAMING_SNAKE_CASE : Tuple = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978 __SCREAMING_SNAKE_CASE : Tuple = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526 __SCREAMING_SNAKE_CASE : Dict = 1_024 if '''speech-commands''' not in model_name else 128 __SCREAMING_SNAKE_CASE : List[Any] = ASTFeatureExtractor(mean=snake_case , std=snake_case , max_length=snake_case ) if "speech-commands" in model_name: __SCREAMING_SNAKE_CASE : List[Any] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) __SCREAMING_SNAKE_CASE : Dict = dataset[0]['''audio''']['''array'''] else: __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = torchaudio.load(snake_case ) __SCREAMING_SNAKE_CASE : str = waveform.squeeze().numpy() __SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(snake_case , sampling_rate=16_000 , return_tensors='''pt''' ) # forward pass __SCREAMING_SNAKE_CASE : str = model(**snake_case ) __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": __SCREAMING_SNAKE_CASE : str = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": __SCREAMING_SNAKE_CASE : Any = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , snake_case , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(snake_case ).mkdir(exist_ok=snake_case ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' ) feature_extractor.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F'''MIT/{model_name}''' ) feature_extractor.push_to_hub(F'''MIT/{model_name}''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
from __future__ import annotations from typing import Any class __UpperCamelCase : """simple docstring""" def __init__( self : List[Any] , _A : int = 6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Node | None = None __SCREAMING_SNAKE_CASE : Node | None = None self.create_linked_list(_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Node() __SCREAMING_SNAKE_CASE : List[Any] = current_node __SCREAMING_SNAKE_CASE : Optional[Any] = current_node __SCREAMING_SNAKE_CASE : Union[str, Any] = current_node for _ in range(1 , _A ): __SCREAMING_SNAKE_CASE : Dict = Node() __SCREAMING_SNAKE_CASE : List[Any] = current_node __SCREAMING_SNAKE_CASE : Optional[int] = previous_node __SCREAMING_SNAKE_CASE : Any = current_node __SCREAMING_SNAKE_CASE : List[Any] = self.front __SCREAMING_SNAKE_CASE : List[Any] = previous_node def UpperCAmelCase__ ( self : Any ): """simple docstring""" return ( self.front == self.rear and self.front is not None and self.front.data is None ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" self.check_can_perform_operation() return self.front.data if self.front else None def UpperCAmelCase__ ( self : str , _A : Any ): """simple docstring""" if self.rear is None: return self.check_is_full() if not self.is_empty(): __SCREAMING_SNAKE_CASE : List[str] = self.rear.next if self.rear: __SCREAMING_SNAKE_CASE : Any = data def UpperCAmelCase__ ( self : str ): """simple docstring""" self.check_can_perform_operation() if self.rear is None or self.front is None: return None if self.front == self.rear: __SCREAMING_SNAKE_CASE : Dict = self.front.data __SCREAMING_SNAKE_CASE : Optional[int] = None return data __SCREAMING_SNAKE_CASE : List[str] = self.front __SCREAMING_SNAKE_CASE : int = old_front.next __SCREAMING_SNAKE_CASE : Dict = old_front.data __SCREAMING_SNAKE_CASE : str = None return data def UpperCAmelCase__ ( self : Dict ): """simple docstring""" if self.is_empty(): raise Exception('''Empty Queue''' ) def UpperCAmelCase__ ( self : str ): """simple docstring""" if self.rear and self.rear.next == self.front: raise Exception('''Full Queue''' ) class __UpperCamelCase : """simple docstring""" def __init__( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any | None = None __SCREAMING_SNAKE_CASE : Node | None = None __SCREAMING_SNAKE_CASE : Node | None = None if __name__ == "__main__": import doctest doctest.testmod()
74
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
1
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = checkpoint __SCREAMING_SNAKE_CASE : Optional[int] = {} __SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict['''encoder.conv_in.weight'''] __SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''encoder.conv_in.bias'''] __SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''encoder.conv_out.weight'''] __SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''encoder.conv_out.bias'''] __SCREAMING_SNAKE_CASE : int = vae_state_dict['''encoder.norm_out.weight'''] __SCREAMING_SNAKE_CASE : str = vae_state_dict['''encoder.norm_out.bias'''] __SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''decoder.conv_in.weight'''] __SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''decoder.conv_in.bias'''] __SCREAMING_SNAKE_CASE : Optional[int] = vae_state_dict['''decoder.conv_out.weight'''] __SCREAMING_SNAKE_CASE : Any = vae_state_dict['''decoder.conv_out.bias'''] __SCREAMING_SNAKE_CASE : List[Any] = vae_state_dict['''decoder.norm_out.weight'''] __SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''decoder.norm_out.bias'''] __SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict['''quant_conv.weight'''] __SCREAMING_SNAKE_CASE : List[str] = vae_state_dict['''quant_conv.bias'''] __SCREAMING_SNAKE_CASE : Tuple = vae_state_dict['''post_quant_conv.weight'''] __SCREAMING_SNAKE_CASE : Dict = vae_state_dict['''post_quant_conv.bias'''] # Retrieves the keys for the encoder down blocks only __SCREAMING_SNAKE_CASE : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(snake_case ) } # Retrieves the keys for the decoder up blocks only __SCREAMING_SNAKE_CASE : str = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(snake_case ) } for i in range(snake_case ): __SCREAMING_SNAKE_CASE : Optional[int] = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key] if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: __SCREAMING_SNAKE_CASE : Union[str, Any] = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.weight''' ) __SCREAMING_SNAKE_CASE : Any = vae_state_dict.pop( F'''encoder.down.{i}.downsample.conv.bias''' ) __SCREAMING_SNAKE_CASE : Any = renew_vae_resnet_paths(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''old''': F'''down.{i}.block''', '''new''': F'''down_blocks.{i}.resnets'''} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) __SCREAMING_SNAKE_CASE : Dict = [key for key in vae_state_dict if '''encoder.mid.block''' in key] __SCREAMING_SNAKE_CASE : List[str] = 2 for i in range(1 , num_mid_res_blocks + 1 ): __SCREAMING_SNAKE_CASE : str = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key] __SCREAMING_SNAKE_CASE : List[Any] = renew_vae_resnet_paths(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = [key for key in vae_state_dict if '''encoder.mid.attn''' in key] __SCREAMING_SNAKE_CASE : str = renew_vae_attention_paths(snake_case ) __SCREAMING_SNAKE_CASE : int = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) conv_attn_to_linear(snake_case ) for i in range(snake_case ): __SCREAMING_SNAKE_CASE : Any = num_up_blocks - 1 - i __SCREAMING_SNAKE_CASE : Union[str, Any] = [ key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key ] if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: __SCREAMING_SNAKE_CASE : Optional[Any] = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.weight''' ] __SCREAMING_SNAKE_CASE : Dict = vae_state_dict[ F'''decoder.up.{block_id}.upsample.conv.bias''' ] __SCREAMING_SNAKE_CASE : Tuple = renew_vae_resnet_paths(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = {'''old''': F'''up.{block_id}.block''', '''new''': F'''up_blocks.{i}.resnets'''} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = [key for key in vae_state_dict if '''decoder.mid.block''' in key] __SCREAMING_SNAKE_CASE : List[Any] = 2 for i in range(1 , num_mid_res_blocks + 1 ): __SCREAMING_SNAKE_CASE : Tuple = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key] __SCREAMING_SNAKE_CASE : Optional[Any] = renew_vae_resnet_paths(snake_case ) __SCREAMING_SNAKE_CASE : Tuple = {'''old''': F'''mid.block_{i}''', '''new''': F'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key in vae_state_dict if '''decoder.mid.attn''' in key] __SCREAMING_SNAKE_CASE : Optional[Any] = renew_vae_attention_paths(snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''} assign_to_checkpoint(snake_case , snake_case , snake_case , additional_replacements=[meta_path] , config=snake_case ) conv_attn_to_linear(snake_case ) return new_checkpoint def a__ ( snake_case , snake_case , ): """simple docstring""" # Only support V1 __SCREAMING_SNAKE_CASE : List[Any] = requests.get( ''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' ) __SCREAMING_SNAKE_CASE : Dict = io.BytesIO(r.content ) __SCREAMING_SNAKE_CASE : Tuple = OmegaConf.load(snake_case ) __SCREAMING_SNAKE_CASE : Dict = 512 __SCREAMING_SNAKE_CASE : Optional[int] = '''cuda''' if torch.cuda.is_available() else '''cpu''' if checkpoint_path.endswith('''safetensors''' ): from safetensors import safe_open __SCREAMING_SNAKE_CASE : Optional[Any] = {} with safe_open(snake_case , framework='''pt''' , device='''cpu''' ) as f: for key in f.keys(): __SCREAMING_SNAKE_CASE : Dict = f.get_tensor(snake_case ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )['''state_dict'''] # Convert the VAE model. __SCREAMING_SNAKE_CASE : int = create_vae_diffusers_config(snake_case , image_size=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = custom_convert_ldm_vae_checkpoint(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = AutoencoderKL(**snake_case ) vae.load_state_dict(snake_case ) vae.save_pretrained(snake_case ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--vae_pt_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the VAE.pt to convert.""") lowercase_ = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
74
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
1
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''ylacombe/bark-small''' __SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp() __SCREAMING_SNAKE_CASE : Union[str, Any] = '''en_speaker_1''' __SCREAMING_SNAKE_CASE : Optional[int] = '''This is a test string''' __SCREAMING_SNAKE_CASE : Tuple = '''speaker_embeddings_path.json''' __SCREAMING_SNAKE_CASE : str = '''speaker_embeddings''' def UpperCAmelCase__ ( self : Optional[Any] , **_A : int ): """simple docstring""" return AutoTokenizer.from_pretrained(self.checkpoint , **_A ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" shutil.rmtree(self.tmpdirname ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.get_tokenizer() __SCREAMING_SNAKE_CASE : int = BarkProcessor(tokenizer=_A ) processor.save_pretrained(self.tmpdirname ) __SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __SCREAMING_SNAKE_CASE : Optional[int] = 35 __SCREAMING_SNAKE_CASE : Optional[Any] = 2 __SCREAMING_SNAKE_CASE : Optional[Any] = 8 __SCREAMING_SNAKE_CASE : Tuple = { '''semantic_prompt''': np.ones(_A ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __SCREAMING_SNAKE_CASE : Any = processor(text=self.input_string , voice_preset=_A ) __SCREAMING_SNAKE_CASE : str = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_A , np.array([] ) ).tolist() ) # test loading voice preset from npz file __SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(_A , **_A ) __SCREAMING_SNAKE_CASE : str = processor(text=self.input_string , voice_preset=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_A , np.array([] ) ).tolist() ) # test loading voice preset from the hub __SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=self.voice_preset ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[int] = BarkProcessor(tokenizer=_A ) __SCREAMING_SNAKE_CASE : Dict = processor(text=self.input_string ) __SCREAMING_SNAKE_CASE : str = tokenizer( self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=_A , return_attention_mask=_A , return_token_type_ids=_A , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
74
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
1
from dataclasses import dataclass, field from typing import Optional from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( metadata={'''help''': '''The output directory where the model will be written.'''} , ) lowerCAmelCase_ = field( metadata={ '''help''': ( '''The encoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train an encoder model from scratch.''' ) } , ) lowerCAmelCase_ = field( metadata={ '''help''': ( '''The decoder model checkpoint for weights initialization.''' '''Don\'t set if you want to train a decoder model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = HfArgumentParser((ModelArguments,) ) ((__SCREAMING_SNAKE_CASE), ) : List[str] = parser.parse_args_into_dataclasses() # Load pretrained model and tokenizer # Use explicit specified encoder config if model_args.encoder_config_name: __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name ) # Use pretrained encoder model's config else: __SCREAMING_SNAKE_CASE : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path ) # Use explicit specified decoder config if model_args.decoder_config_name: __SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name ) # Use pretrained decoder model's config else: __SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path ) # necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed __SCREAMING_SNAKE_CASE : Optional[Any] = True __SCREAMING_SNAKE_CASE : int = True __SCREAMING_SNAKE_CASE : Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained( encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=snake_case , decoder_config=snake_case , ) # GPT2 only has bos/eos tokens but not decoder_start/pad tokens __SCREAMING_SNAKE_CASE : str = decoder_config.decoder_start_token_id __SCREAMING_SNAKE_CASE : List[Any] = decoder_config.pad_token_id if decoder_start_token_id is None: __SCREAMING_SNAKE_CASE : Optional[Any] = decoder_config.bos_token_id if pad_token_id is None: __SCREAMING_SNAKE_CASE : str = decoder_config.eos_token_id # This is necessary to make Flax's generate() work __SCREAMING_SNAKE_CASE : int = decoder_config.eos_token_id __SCREAMING_SNAKE_CASE : Tuple = decoder_start_token_id __SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id __SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path ) __SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id ) model.save_pretrained(model_args.output_dir ) image_processor.save_pretrained(model_args.output_dir ) tokenizer.save_pretrained(model_args.output_dir ) if __name__ == "__main__": main()
74
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""", """funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""", """funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""", """funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""", """funnel-transformer/intermediate""": ( """https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json""" ), """funnel-transformer/intermediate-base""": ( """https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json""" ), """funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""", """funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""", """funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""", """funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""", } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''funnel''' lowerCAmelCase_ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', } def __init__( self : Dict , _A : Any=3_0522 , _A : Tuple=[4, 4, 4] , _A : Optional[Any]=None , _A : int=2 , _A : Any=768 , _A : str=12 , _A : Any=64 , _A : Union[str, Any]=3072 , _A : Any="gelu_new" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[Any]=0.0 , _A : int=0.1 , _A : Optional[int]=None , _A : Tuple=1e-9 , _A : Optional[Any]="mean" , _A : Dict="relative_shift" , _A : int=True , _A : List[str]=True , _A : List[Any]=True , **_A : List[Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size __SCREAMING_SNAKE_CASE : Dict = block_sizes __SCREAMING_SNAKE_CASE : Optional[Any] = [1] * len(_A ) if block_repeats is None else block_repeats assert len(_A ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." __SCREAMING_SNAKE_CASE : Union[str, Any] = num_decoder_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : int = n_head __SCREAMING_SNAKE_CASE : int = d_head __SCREAMING_SNAKE_CASE : Dict = d_inner __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Any = hidden_dropout __SCREAMING_SNAKE_CASE : List[str] = attention_dropout __SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = initializer_std __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps assert pooling_type in [ "mean", "max", ], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' __SCREAMING_SNAKE_CASE : Optional[Any] = pooling_type assert attention_type in [ "relative_shift", "factorized", ], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' __SCREAMING_SNAKE_CASE : int = attention_type __SCREAMING_SNAKE_CASE : Dict = separate_cls __SCREAMING_SNAKE_CASE : Optional[int] = truncate_seq __SCREAMING_SNAKE_CASE : Any = pool_q_only super().__init__(**_A ) @property def UpperCAmelCase__ ( self : Any ): """simple docstring""" return sum(self.block_sizes ) @num_hidden_layers.setter def UpperCAmelCase__ ( self : Dict , _A : List[Any] ): """simple docstring""" raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' ) @property def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" return len(self.block_sizes ) @num_blocks.setter def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[Any] ): """simple docstring""" raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
74
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
1
from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertConfig, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) class __UpperCamelCase : """simple docstring""" def __init__( self : List[str] , _A : Any , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = parent __SCREAMING_SNAKE_CASE : Union[str, Any] = 13 __SCREAMING_SNAKE_CASE : List[str] = 7 __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : List[str] = True __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : Optional[int] = True __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : str = False __SCREAMING_SNAKE_CASE : List[Any] = False __SCREAMING_SNAKE_CASE : Dict = 2 __SCREAMING_SNAKE_CASE : List[Any] = 99 __SCREAMING_SNAKE_CASE : Tuple = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = 32 __SCREAMING_SNAKE_CASE : Optional[int] = 2 __SCREAMING_SNAKE_CASE : List[str] = 4 __SCREAMING_SNAKE_CASE : List[str] = 0.1 __SCREAMING_SNAKE_CASE : List[Any] = 0.1 __SCREAMING_SNAKE_CASE : str = 512 __SCREAMING_SNAKE_CASE : Any = 16 __SCREAMING_SNAKE_CASE : Tuple = 2 __SCREAMING_SNAKE_CASE : str = 0.02 __SCREAMING_SNAKE_CASE : Optional[int] = 3 __SCREAMING_SNAKE_CASE : Optional[int] = 4 __SCREAMING_SNAKE_CASE : Optional[int] = '''last''' __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : int = None __SCREAMING_SNAKE_CASE : Tuple = 0 def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa ) __SCREAMING_SNAKE_CASE : int = None if self.use_input_lengths: __SCREAMING_SNAKE_CASE : Tuple = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __SCREAMING_SNAKE_CASE : Tuple = None if self.use_token_type_ids: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : Tuple = None __SCREAMING_SNAKE_CASE : Any = None if self.use_labels: __SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa ) __SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : List[Any] = FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase__ ( self : Dict , _A : int , _A : Dict , _A : List[Any] , _A : Dict , _A : Dict , _A : List[Any] , _A : str , _A : Tuple , _A : List[Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = TFFlaubertModel(config=_A ) __SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) __SCREAMING_SNAKE_CASE : int = [input_ids, input_mask] __SCREAMING_SNAKE_CASE : Optional[int] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : List[str] , _A : Optional[int] , _A : List[Any] , _A : Dict , _A : int , _A : Tuple , _A : Optional[int] , _A : Optional[int] , _A : List[str] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertWithLMHeadModel(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids} __SCREAMING_SNAKE_CASE : Any = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : List[str] , _A : Any , _A : Any , _A : List[Any] , _A : List[str] , _A : Tuple , _A : Any , _A : Optional[Any] , _A : Optional[int] , _A : str , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = TFFlaubertForQuestionAnsweringSimple(_A ) __SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths} __SCREAMING_SNAKE_CASE : str = model(_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Tuple , _A : Any , _A : Any , _A : Optional[Any] , _A : Any , _A : str , _A : Optional[Any] , _A : Optional[int] , _A : Optional[Any] , _A : List[str] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = TFFlaubertForSequenceClassification(_A ) __SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths} __SCREAMING_SNAKE_CASE : Optional[Any] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCAmelCase__ ( self : Dict , _A : Dict , _A : Any , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : List[str] , _A : int , _A : Dict , _A : Optional[int] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.num_labels __SCREAMING_SNAKE_CASE : int = TFFlaubertForTokenClassification(config=_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids} __SCREAMING_SNAKE_CASE : Optional[int] = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Any , _A : str , _A : List[str] , _A : Union[str, Any] , _A : Any , _A : Tuple , _A : str , _A : List[Any] , _A : Optional[Any] , _A : Dict , ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.num_choices __SCREAMING_SNAKE_CASE : Tuple = TFFlaubertForMultipleChoice(config=_A ) __SCREAMING_SNAKE_CASE : List[str] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE : Dict = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) ) __SCREAMING_SNAKE_CASE : Dict = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, '''token_type_ids''': multiple_choice_token_type_ids, } __SCREAMING_SNAKE_CASE : int = model(_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs() ( ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ) : Optional[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Tuple = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''langs''': token_type_ids, '''lengths''': input_lengths, } return config, inputs_dict @require_tf class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( TFFlaubertModel, TFFlaubertWithLMHeadModel, TFFlaubertForSequenceClassification, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForTokenClassification, TFFlaubertForMultipleChoice, ) if is_tf_available() else () ) lowerCAmelCase_ = ( (TFFlaubertWithLMHeadModel,) if is_tf_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable lowerCAmelCase_ = ( { '''feature-extraction''': TFFlaubertModel, '''fill-mask''': TFFlaubertWithLMHeadModel, '''question-answering''': TFFlaubertForQuestionAnsweringSimple, '''text-classification''': TFFlaubertForSequenceClassification, '''token-classification''': TFFlaubertForTokenClassification, '''zero-shot''': TFFlaubertForSequenceClassification, } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False def UpperCAmelCase__ ( self : List[str] , _A : str , _A : Any , _A : List[Any] , _A : str , _A : str ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('''Fast''' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = TFFlaubertModelTester(self ) __SCREAMING_SNAKE_CASE : Dict = ConfigTester(self , config_class=_A , emb_dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*_A ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*_A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : int = TFFlaubertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @require_tf @require_sentencepiece @require_tokenizers class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' ) __SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !" __SCREAMING_SNAKE_CASE : List[str] = model(_A )[0] __SCREAMING_SNAKE_CASE : Tuple = tf.TensorShape((1, 8, 512) ) self.assertEqual(output.shape , _A ) # compare the actual values for a slice. __SCREAMING_SNAKE_CASE : Optional[int] = tf.convert_to_tensor( [ [ [-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18], [-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99], [-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52], ] ] , dtype=tf.floataa , ) self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
74
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
1
from __future__ import annotations from statistics import mean def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = [0] * no_of_processes __SCREAMING_SNAKE_CASE : List[str] = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(snake_case ): __SCREAMING_SNAKE_CASE : Dict = burst_time[i] __SCREAMING_SNAKE_CASE : list[int] = [] __SCREAMING_SNAKE_CASE : List[Any] = 0 __SCREAMING_SNAKE_CASE : Optional[Any] = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: __SCREAMING_SNAKE_CASE : Dict = [] __SCREAMING_SNAKE_CASE : Optional[Any] = -1 for i in range(snake_case ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(snake_case ) if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: __SCREAMING_SNAKE_CASE : Union[str, Any] = i total_time += burst_time[target_process] completed += 1 __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Union[str, Any] = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [0] * no_of_processes for i in range(snake_case ): __SCREAMING_SNAKE_CASE : Union[str, Any] = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") lowercase_ = 4 lowercase_ = [2, 5, 3, 7] lowercase_ = [0, 0, 0, 0] lowercase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) lowercase_ = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( f'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t''' f'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}''' ) print(f'''\nAverage waiting time = {mean(waiting_time):.5f}''') print(f'''Average turnaround time = {mean(turn_around_time):.5f}''')
74
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
1
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase__ ( self : Any ): """simple docstring""" torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=_A , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) __SCREAMING_SNAKE_CASE : List[str] = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=32 , ) __SCREAMING_SNAKE_CASE : int = CLIPTextModel(_A ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_A ) __SCREAMING_SNAKE_CASE : List[str] = CLIPTextModelWithProjection(_A ) __SCREAMING_SNAKE_CASE : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=_A ) __SCREAMING_SNAKE_CASE : int = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''text_encoder_2''': text_encoder_a, '''tokenizer_2''': tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def UpperCAmelCase__ ( self : Any , _A : Union[str, Any] , _A : Tuple=0 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A ) __SCREAMING_SNAKE_CASE : Tuple = image / 2 + 0.5 if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 5.0, '''output_type''': '''numpy''', '''strength''': 0.75, } return inputs def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionXLImgaImgPipeline(**_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_A ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : int ): """simple docstring""" super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" pass def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**_A ) __SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(_A ) sd_pipe.set_progress_bar_config(disable=_A ) # forward without prompt embeds __SCREAMING_SNAKE_CASE : str = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = 3 * ['''this is a negative prompt'''] __SCREAMING_SNAKE_CASE : Union[str, Any] = negative_prompt __SCREAMING_SNAKE_CASE : List[Any] = 3 * [inputs['''prompt''']] __SCREAMING_SNAKE_CASE : Optional[int] = sd_pipe(**_A ) __SCREAMING_SNAKE_CASE : int = output.images[0, -3:, -3:, -1] # forward with prompt embeds __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_inputs(_A ) __SCREAMING_SNAKE_CASE : Dict = 3 * ['''this is a negative prompt'''] __SCREAMING_SNAKE_CASE : str = 3 * [inputs.pop('''prompt''' )] ( ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ( __SCREAMING_SNAKE_CASE ), ) : Tuple = sd_pipe.encode_prompt(_A , negative_prompt=_A ) __SCREAMING_SNAKE_CASE : Dict = sd_pipe( **_A , prompt_embeds=_A , negative_prompt_embeds=_A , pooled_prompt_embeds=_A , negative_pooled_prompt_embeds=_A , ) __SCREAMING_SNAKE_CASE : str = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : str , _A : Union[str, Any] , _A : Tuple="cpu" , _A : str=torch.floataa , _A : List[str]=0 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : Dict = np.random.RandomState(_A ).standard_normal((1, 4, 64, 64) ) __SCREAMING_SNAKE_CASE : str = torch.from_numpy(_A ).to(device=_A , dtype=_A ) __SCREAMING_SNAKE_CASE : Tuple = { '''prompt''': '''a photograph of an astronaut riding a horse''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' ) pipe.to(_A ) pipe.set_progress_bar_config(disable=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe(**_A ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
74
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
1
class __UpperCamelCase : """simple docstring""" def __init__( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : dict[str, TrieNode] = {} # Mapping from char to TrieNode __SCREAMING_SNAKE_CASE : List[str] = False def UpperCAmelCase__ ( self : Dict , _A : list[str] ): """simple docstring""" for word in words: self.insert(_A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self for char in word: if char not in curr.nodes: __SCREAMING_SNAKE_CASE : str = TrieNode() __SCREAMING_SNAKE_CASE : Optional[int] = curr.nodes[char] __SCREAMING_SNAKE_CASE : Optional[int] = True def UpperCAmelCase__ ( self : int , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self for char in word: if char not in curr.nodes: return False __SCREAMING_SNAKE_CASE : int = curr.nodes[char] return curr.is_leaf def UpperCAmelCase__ ( self : List[str] , _A : str ): """simple docstring""" def _delete(_A : TrieNode , _A : str , _A : int ) -> bool: if index == len(_A ): # If word does not exist if not curr.is_leaf: return False __SCREAMING_SNAKE_CASE : Dict = False return len(curr.nodes ) == 0 __SCREAMING_SNAKE_CASE : Optional[int] = word[index] __SCREAMING_SNAKE_CASE : Optional[int] = curr.nodes.get(_A ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __SCREAMING_SNAKE_CASE : List[str] = _delete(_A , _A , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , _A , 0 ) def a__ ( snake_case , snake_case ): """simple docstring""" if node.is_leaf: print(snake_case , end=''' ''' ) for key, value in node.nodes.items(): print_words(snake_case , word + key ) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = '''banana bananas bandana band apple all beast'''.split() __SCREAMING_SNAKE_CASE : Any = TrieNode() root.insert_many(snake_case ) # print_words(root, "") assert all(root.find(snake_case ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def a__ ( snake_case , snake_case ): """simple docstring""" print(str(snake_case ) , '''works!''' if passes else '''doesn\'t work :(''' ) def a__ ( ): """simple docstring""" assert test_trie() def a__ ( ): """simple docstring""" print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def a__ ( snake_case = 100 ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = 1 __SCREAMING_SNAKE_CASE : int = 2 for i in range(2 , max_n + 1 ): __SCREAMING_SNAKE_CASE : Optional[Any] = pre_numerator __SCREAMING_SNAKE_CASE : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1 __SCREAMING_SNAKE_CASE : Dict = cur_numerator __SCREAMING_SNAKE_CASE : Dict = e_cont * pre_numerator + temp return sum_digits(snake_case ) if __name__ == "__main__": print(f'''{solution() = }''')
74
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''pixel_values'''] def __init__( self : str , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : bool = True , **_A : Dict , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''height''': 384, '''width''': 384} __SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize __SCREAMING_SNAKE_CASE : List[Any] = size __SCREAMING_SNAKE_CASE : Optional[int] = resample __SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale __SCREAMING_SNAKE_CASE : str = rescale_factor __SCREAMING_SNAKE_CASE : List[str] = do_normalize __SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __SCREAMING_SNAKE_CASE : Dict = do_convert_rgb def UpperCAmelCase__ ( self : int , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[Any] , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(_A , default_to_square=_A ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) __SCREAMING_SNAKE_CASE : int = (size['''height'''], size['''width''']) return resize(_A , size=_A , resample=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Dict , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ): """simple docstring""" return rescale(_A , scale=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : Union[str, Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ): """simple docstring""" return normalize(_A , mean=_A , std=_A , data_format=_A , **_A ) def UpperCAmelCase__ ( self : int , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : bool = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : int , ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = do_resize if do_resize is not None else self.do_resize __SCREAMING_SNAKE_CASE : List[str] = resample if resample is not None else self.resample __SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale __SCREAMING_SNAKE_CASE : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean __SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else self.image_std __SCREAMING_SNAKE_CASE : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __SCREAMING_SNAKE_CASE : Any = size if size is not None else self.size __SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(_A , default_to_square=_A ) __SCREAMING_SNAKE_CASE : Any = make_list_of_images(_A ) if not valid_images(_A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __SCREAMING_SNAKE_CASE : Optional[Any] = [convert_to_rgb(_A ) for image in images] # All transformations expect numpy arrays. __SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(_A ) for image in images] if do_resize: __SCREAMING_SNAKE_CASE : List[Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images] if do_rescale: __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=_A , scale=_A ) for image in images] if do_normalize: __SCREAMING_SNAKE_CASE : Tuple = [self.normalize(image=_A , mean=_A , std=_A ) for image in images] __SCREAMING_SNAKE_CASE : int = [to_channel_dimension_format(_A , _A ) for image in images] __SCREAMING_SNAKE_CASE : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=_A ) return encoded_outputs
74
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
1
def a__ ( snake_case = 10 , snake_case = 22 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = range(1 , snake_case ) __SCREAMING_SNAKE_CASE : List[Any] = range(1 , snake_case ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
74
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
1
from ..utils import DummyObject, requires_backends class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : str , *_A : Union[str, Any] , **_A : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *_A : int , **_A : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Tuple , *_A : List[str] , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[Any] , *_A : Any , **_A : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *_A : Dict , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[Any] , *_A : List[str] , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[int] , *_A : int , **_A : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[int] , *_A : Tuple , **_A : Dict ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : str , *_A : Union[str, Any] , **_A : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : str , *_A : Dict , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Tuple , **_A : Optional[int] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : List[Any] , *_A : Any , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : str , *_A : Optional[int] , **_A : Optional[int] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Dict , *_A : Tuple , **_A : List[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[Any] , *_A : Dict , **_A : Tuple ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Tuple , *_A : Union[str, Any] , **_A : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Optional[Any] , **_A : Optional[int] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Optional[Any] , **_A : str ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[int] , *_A : Optional[int] , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Tuple , **_A : Optional[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Dict , *_A : Dict , **_A : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Union[str, Any] , *_A : List[Any] , **_A : Optional[Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[Any] , *_A : Tuple , **_A : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : int , *_A : Tuple , **_A : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Tuple , **_A : Tuple ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Dict , *_A : Optional[int] , **_A : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Optional[int] , *_A : int , **_A : Dict ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Any , **_A : Any ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : List[str] , *_A : Optional[Any] , **_A : int ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : Any , *_A : Optional[int] , **_A : Union[str, Any] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] ) class __UpperCamelCase ( metaclass=lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = ['''sentencepiece'''] def __init__( self : str , *_A : int , **_A : List[str] ): """simple docstring""" requires_backends(self , ['''sentencepiece'''] )
74
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" super().tearDown() gc.collect() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : Optional[Any] = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : int = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Tuple = replicate(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = shard(_A ) __SCREAMING_SNAKE_CASE : Dict = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : str = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Tuple = jnp.array([0.42_38, 0.44_14, 0.43_95, 0.44_53, 0.46_29, 0.45_90, 0.45_31, 0.4_55_08, 0.45_12] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = '''stabilityai/stable-diffusion-2''' __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_A , subfolder='''scheduler''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( _A , scheduler=_A , revision='''bf16''' , dtype=jnp.bfloataa , ) __SCREAMING_SNAKE_CASE : List[str] = scheduler_params __SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger''' __SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() __SCREAMING_SNAKE_CASE : Tuple = num_samples * [prompt] __SCREAMING_SNAKE_CASE : Any = sd_pipe.prepare_inputs(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A ) __SCREAMING_SNAKE_CASE : List[str] = shard(_A ) __SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , jax.device_count() ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(_A , _A , _A , num_inference_steps=25 , jit=_A )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) __SCREAMING_SNAKE_CASE : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __SCREAMING_SNAKE_CASE : Dict = images[0, 253:256, 253:256, -1] __SCREAMING_SNAKE_CASE : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.array([0.43_36, 0.4_29_69, 0.44_53, 0.41_99, 0.42_97, 0.45_31, 0.44_34, 0.44_34, 0.42_97] ) print(F'''output_slice: {output_slice}''' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
74
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = IFPipeline lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self._get_dummy_components() def UpperCAmelCase__ ( self : Optional[int] , _A : Dict , _A : Dict=0 ): """simple docstring""" if str(_A ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(_A ) else: __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" super().test_save_load_floataa(expected_max_diff=1e-1 ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self._test_save_load_local() def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : Any ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : int = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=_A , tokenizer=_A ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __SCREAMING_SNAKE_CASE : List[Any] = None __SCREAMING_SNAKE_CASE : str = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(_A , _A , _A , _A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __SCREAMING_SNAKE_CASE : Dict = IFImgaImgPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(_A , _A , _A , _A ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __SCREAMING_SNAKE_CASE : int = IFInpaintingPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : Dict = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(_A , _A , _A , _A ) def UpperCAmelCase__ ( self : Optional[Any] , _A : str , _A : Optional[Any] , _A : Tuple , _A : List[str] ): """simple docstring""" _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , num_inference_steps=2 , generator=_A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : int = output.images[0] assert image.shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 __SCREAMING_SNAKE_CASE : List[str] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(_A , _A ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : int = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : int = output.images[0] assert image.shape == (256, 256, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __SCREAMING_SNAKE_CASE : Optional[int] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(_A , _A ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : List[Any] , _A : Optional[int] , _A : Union[str, Any] ): """simple docstring""" _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , num_inference_steps=2 , generator=_A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Any = output.images[0] assert image.shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __SCREAMING_SNAKE_CASE : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(_A , _A ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Tuple = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : List[Any] = output.images[0] assert image.shape == (256, 256, 3) __SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] , _A : List[str] , _A : List[str] , _A : Any , _A : Dict ): """simple docstring""" _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_A ) __SCREAMING_SNAKE_CASE : int = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Tuple = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , num_inference_steps=2 , generator=_A , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : List[Any] = output.images[0] assert image.shape == (64, 64, 3) __SCREAMING_SNAKE_CASE : int = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 __SCREAMING_SNAKE_CASE : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(_A , _A ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = pipe_a( prompt_embeds=_A , negative_prompt_embeds=_A , image=_A , mask_image=_A , original_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (256, 256, 3) __SCREAMING_SNAKE_CASE : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(_A , _A ) def a__ ( ): """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
74
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase_ = { """configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""], """processing_layoutlmv2""": ["""LayoutLMv2Processor"""], """tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2TokenizerFast"""] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["""LayoutLMv2FeatureExtractor"""] lowercase_ = ["""LayoutLMv2ImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """LayoutLMv2ForQuestionAnswering""", """LayoutLMv2ForSequenceClassification""", """LayoutLMv2ForTokenClassification""", """LayoutLMv2Layer""", """LayoutLMv2Model""", """LayoutLMv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaLayer, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
1
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def __lowercase ( snake_case, snake_case, snake_case ): """simple docstring""" __magic_name__ :str = AutoConfig.from_pretrained(snake_case ) __magic_name__ :Dict = FlaxAutoModelForSeqaSeqLM.from_config(config=snake_case ) __magic_name__ :Any = checkpoints.load_tax_checkpoint(snake_case ) __magic_name__ :List[str] = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": __magic_name__ :Tuple = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": __magic_name__ :Optional[int] = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ :Any = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): __magic_name__ :Union[str, Any] = f'''layers_{str(snake_case )}''' # Self-Attention __magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] __magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] __magic_name__ :str = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] __magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization __magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: __magic_name__ :Any = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] __magic_name__ :Tuple = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: __magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] __magic_name__ :List[str] = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization __magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning __magic_name__ :Optional[int] = flax_model.params['''encoder''']['''block'''][str(snake_case )]['''layer'''] __magic_name__ :List[Any] = tax_attention_key __magic_name__ :List[str] = tax_attention_out __magic_name__ :Optional[int] = tax_attention_query __magic_name__ :str = tax_attention_value __magic_name__ :Dict = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ :Any = tax_global_layer_norm if split_mlp_wi: __magic_name__ :str = tax_mlp_wi_a __magic_name__ :Dict = tax_mlp_wi_a else: __magic_name__ :Tuple = tax_mlp_wi __magic_name__ :Optional[int] = tax_mlp_wo __magic_name__ :Optional[int] = tax_mlp_layer_norm __magic_name__ :Any = flax_model_encoder_layer_block # Only for layer 0: __magic_name__ :Dict = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T __magic_name__ :List[Any] = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": __magic_name__ :Any = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T __magic_name__ :Dict = tax_encoder_global_rel_embedding # Assigning __magic_name__ :Union[str, Any] = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] __magic_name__ :List[str] = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): __magic_name__ :List[Any] = f'''layers_{str(snake_case )}''' # Self-Attention __magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] __magic_name__ :str = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] __magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] __magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization __magic_name__ :Optional[int] = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention __magic_name__ :Union[str, Any] = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] __magic_name__ :Tuple = tax_enc_dec_attention_module['''key''']['''kernel'''] __magic_name__ :Optional[int] = tax_enc_dec_attention_module['''out''']['''kernel'''] __magic_name__ :List[str] = tax_enc_dec_attention_module['''query''']['''kernel'''] __magic_name__ :Tuple = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization __magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: __magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] __magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: __magic_name__ :int = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] __magic_name__ :Optional[Any] = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization __magic_name__ :Dict = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning __magic_name__ :List[str] = flax_model.params['''decoder''']['''block'''][str(snake_case )]['''layer'''] __magic_name__ :Any = tax_attention_key __magic_name__ :List[str] = tax_attention_out __magic_name__ :Tuple = tax_attention_query __magic_name__ :Tuple = tax_attention_value __magic_name__ :Tuple = tax_pre_attention_layer_norm __magic_name__ :Optional[Any] = tax_enc_dec_attention_key __magic_name__ :str = tax_enc_dec_attention_out __magic_name__ :Union[str, Any] = tax_enc_dec_attention_query __magic_name__ :Any = tax_enc_dec_attention_value __magic_name__ :Tuple = tax_cross_layer_norm if split_mlp_wi: __magic_name__ :Optional[int] = tax_mlp_wi_a __magic_name__ :Union[str, Any] = tax_mlp_wi_a else: __magic_name__ :Optional[int] = tax_mlp_wi __magic_name__ :List[str] = tax_mlp_wo __magic_name__ :int = txa_mlp_layer_norm __magic_name__ :str = flax_model_decoder_layer_block # Decoder Normalization __magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] __magic_name__ :Tuple = txa_decoder_norm # Only for layer 0: __magic_name__ :Optional[Any] = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T __magic_name__ :str = tax_decoder_rel_embedding # Token Embeddings __magic_name__ :List[Any] = tax_model['''target''']['''token_embedder''']['''embedding'''] __magic_name__ :Any = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: __magic_name__ :int = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(snake_case ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path the T5X checkpoint.""" ) parser.add_argument("""--config_name""", default=None, type=str, required=True, help="""Config name of LongT5/T5 model.""") parser.add_argument( """--flax_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output FLAX model.""" ) SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
0
import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileBertTokenizer lowerCAmelCase_ = MobileBertTokenizerFast lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = filter_non_english lowerCAmelCase_ = '''google/mobilebert-uncased''' def UpperCAmelCase__ ( self : Dict ): """simple docstring""" super().setUp() __SCREAMING_SNAKE_CASE : List[str] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] __SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __SCREAMING_SNAKE_CASE : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running''' return input_text, output_text def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase__ ( self : int ): """simple docstring""" if not self.test_rust_tokenizer: return __SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) # With lower casing __SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A ) __SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running''' __SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer() __SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A ) self.assertListEqual( tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] ) self.assertListEqual( tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing'''] __SCREAMING_SNAKE_CASE : Dict = {} for i, token in enumerate(_A ): __SCREAMING_SNAKE_CASE : List[str] = i __SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' ) self.assertListEqual(tokenizer.tokenize('''''' ) , [] ) self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] ) def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" self.assertTrue(_is_whitespace(''' ''' ) ) self.assertTrue(_is_whitespace('''\t''' ) ) self.assertTrue(_is_whitespace('''\r''' ) ) self.assertTrue(_is_whitespace('''\n''' ) ) self.assertTrue(_is_whitespace('''\u00A0''' ) ) self.assertFalse(_is_whitespace('''A''' ) ) self.assertFalse(_is_whitespace('''-''' ) ) def UpperCAmelCase__ ( self : str ): """simple docstring""" self.assertTrue(_is_control('''\u0005''' ) ) self.assertFalse(_is_control('''A''' ) ) self.assertFalse(_is_control(''' ''' ) ) self.assertFalse(_is_control('''\t''' ) ) self.assertFalse(_is_control('''\r''' ) ) def UpperCAmelCase__ ( self : Any ): """simple docstring""" self.assertTrue(_is_punctuation('''-''' ) ) self.assertTrue(_is_punctuation('''$''' ) ) self.assertTrue(_is_punctuation('''`''' ) ) self.assertTrue(_is_punctuation('''.''' ) ) self.assertFalse(_is_punctuation('''A''' ) ) self.assertFalse(_is_punctuation(''' ''' ) ) def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) self.assertListEqual( [rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] ) @slow def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A ) __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' __SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus( _A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False __SCREAMING_SNAKE_CASE : Optional[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''A'''), ((1, 2), ''','''), ((3, 5), '''na'''), ((5, 6), '''##ï'''), ((6, 8), '''##ve'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''Allen'''), ((21, 23), '''##NL'''), ((23, 24), '''##P'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), '''a'''), ((1, 2), ''','''), ((3, 8), '''naive'''), ((9, 15), tokenizer_r.mask_token), ((16, 21), '''allen'''), ((21, 23), '''##nl'''), ((23, 24), '''##p'''), ((25, 33), '''sentence'''), ((33, 34), '''.'''), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) ) self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有'''] __SCREAMING_SNAKE_CASE : int = ''''''.join(_A ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __SCREAMING_SNAKE_CASE : str = True __SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A ) __SCREAMING_SNAKE_CASE : Optional[Any] = False __SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A ) __SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A ) __SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A ) __SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A ) # it is expected that only the first Chinese character is not preceded by "##". __SCREAMING_SNAKE_CASE : List[Any] = [ F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A ) ] self.assertListEqual(_A , _A ) self.assertListEqual(_A , _A )
74
0
import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap __snake_case = '''Usage of script: script_name <size_of_canvas:int>''' __snake_case = [0] * 1_0_0 + [1] * 1_0 random.shuffle(choice) def _A ( _lowercase ) -> list[list[bool]]: """simple docstring""" __UpperCamelCase = [[False for i in range(_lowercase )] for j in range(_lowercase )] return canvas def _A ( _lowercase ) -> None: """simple docstring""" for i, row in enumerate(_lowercase ): for j, _ in enumerate(_lowercase ): __UpperCamelCase = bool(random.getrandbits(1 ) ) def _A ( _lowercase ) -> list[list[bool]]: """simple docstring""" __UpperCamelCase = np.array(_lowercase ) __UpperCamelCase = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(_lowercase ): for c, pt in enumerate(_lowercase ): __UpperCamelCase = __judge_point( _lowercase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) __UpperCamelCase = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. __UpperCamelCase = current_canvas.tolist() return return_canvas def _A ( _lowercase , _lowercase ) -> bool: """simple docstring""" __UpperCamelCase = 0 __UpperCamelCase = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. __UpperCamelCase = pt if pt: if alive < 2: __UpperCamelCase = False elif alive == 2 or alive == 3: __UpperCamelCase = True elif alive > 3: __UpperCamelCase = False else: if alive == 3: __UpperCamelCase = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) __snake_case = int(sys.argv[1]) # main working structure of this module. __snake_case = create_canvas(canvas_size) seed(c) __snake_case , __snake_case = plt.subplots() fig.show() __snake_case = ListedColormap(['''w''', '''k''']) try: while True: __snake_case = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
1
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , *_A : Optional[int] , **_A : Tuple ): """simple docstring""" warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , _A , ) super().__init__(*_A , **_A )
74
0
from __future__ import annotations import copy import tempfile import unittest from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available from transformers.testing_utils import ( DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, RequestCounter, require_tensorflow_probability, require_tf, slow, ) from ..bert.test_modeling_bert import BertModelTester if is_tf_available(): from transformers import ( TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTableQuestionAnswering, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFFunnelBaseModel, TFFunnelModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, TFTapasForQuestionAnswering, ) from transformers.models.auto.modeling_tf_auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ ( _A): """simple docstring""" a__ : Dict = "new-model" if is_tf_available(): class lowerCamelCase__ ( _A): """simple docstring""" a__ : Optional[Any] = NewModelConfig @require_tf class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" @slow def snake_case_ ( self : Optional[int] ) -> List[str]: _A = '''bert-base-cased''' _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : Union[str, Any] ) -> Dict: _A = '''bert-base-cased''' _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForPreTraining.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : List[Any] ) -> Tuple: for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase ) _A , _A = TFAutoModelForCausalLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : str ) -> Optional[Any]: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : List[str] ) -> Union[str, Any]: for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase ) _A , _A = TFAutoModelForMaskedLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : str ) -> Dict: for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ) _A , _A = TFAutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : int ) -> Dict: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow def snake_case_ ( self : Any ) -> Any: # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForQuestionAnswering.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) @slow @require_tensorflow_probability def snake_case_ ( self : List[Any] ) -> str: for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: _A = AutoConfig.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = TFAutoModelForTableQuestionAnswering.from_pretrained(__lowerCAmelCase ) _A , _A = TFAutoModelForTableQuestionAnswering.from_pretrained( __lowerCAmelCase , output_loading_info=__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : str ) -> List[str]: _A = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 ) def snake_case_ ( self : List[str] ) -> Tuple: _A = TFAutoModelWithLMHead.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=__lowerCAmelCase ) , 1_44_10 ) def snake_case_ ( self : Optional[int] ) -> Dict: # For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel _A = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) _A = copy.deepcopy(model.config ) _A = ['''FunnelBaseModel'''] _A = TFAutoModel.from_config(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCAmelCase ) _A = TFAutoModel.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) def snake_case_ ( self : List[str] ) -> Optional[int]: try: AutoConfig.register('''new-model''' , __lowerCAmelCase ) _A = [ TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, ] for auto_class in auto_classes: with self.subTest(auto_class.__name__ ): # Wrong config class will raise an error with self.assertRaises(__lowerCAmelCase ): auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCAmelCase ): auto_class.register(__lowerCAmelCase , __lowerCAmelCase ) # Now that the config is registered, it can be used as any other config with the auto-API _A = BertModelTester(self ).get_config() _A = NewModelConfig(**tiny_config.to_dict() ) _A = auto_class.from_config(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__lowerCAmelCase ) _A = auto_class.from_pretrained(__lowerCAmelCase ) self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"] for mapping in ( TF_MODEL_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, ): if NewModelConfig in mapping._extra_content: del mapping._extra_content[NewModelConfig] def snake_case_ ( self : Optional[int] ) -> Optional[int]: with self.assertRaisesRegex( __lowerCAmelCase , '''bert-base is not a local folder and is not a valid model identifier''' ): _A = TFAutoModel.from_pretrained('''bert-base''' ) def snake_case_ ( self : int ) -> str: with self.assertRaisesRegex( __lowerCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _A = TFAutoModel.from_pretrained(__lowerCAmelCase , revision='''aaaaaa''' ) def snake_case_ ( self : Any ) -> List[str]: with self.assertRaisesRegex( __lowerCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ): _A = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' ) def snake_case_ ( self : List[str] ) -> Any: with self.assertRaisesRegex(__lowerCAmelCase , '''Use `from_pt=True` to load this model''' ): _A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' ) def snake_case_ ( self : List[Any] ) -> Dict: # Make sure we have cached the model. _A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) with RequestCounter() as counter: _A = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 ) # With a sharded checkpoint _A = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) with RequestCounter() as counter: _A = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' ) self.assertEqual(counter.get_request_count , 0 ) self.assertEqual(counter.head_request_count , 1 ) self.assertEqual(counter.other_request_count , 0 )
2
import itertools from dataclasses import dataclass from typing import List, Optional import pyarrow as pa import pyarrow.parquet as pq import datasets from datasets.table import table_cast lowercase_ = datasets.utils.logging.get_logger(__name__) @dataclass class __UpperCamelCase ( datasets.BuilderConfig ): """simple docstring""" lowerCAmelCase_ = 1_00_00 lowerCAmelCase_ = None lowerCAmelCase_ = None class __UpperCamelCase ( datasets.ArrowBasedBuilder ): """simple docstring""" lowerCAmelCase_ = ParquetConfig def UpperCAmelCase__ ( self : Any ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCAmelCase__ ( self : Any , _A : Optional[Any] ): """simple docstring""" if not self.config.data_files: raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __SCREAMING_SNAKE_CASE : List[str] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_A , (str, list, tuple) ): __SCREAMING_SNAKE_CASE : Tuple = data_files if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_A ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )] __SCREAMING_SNAKE_CASE : int = [] for split_name, files in data_files.items(): if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __SCREAMING_SNAKE_CASE : Optional[int] = [dl_manager.iter_files(_A ) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(_A ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : Dict = datasets.Features.from_arrow_schema(pq.read_schema(_A ) ) break splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) ) return splits def UpperCAmelCase__ ( self : str , _A : pa.Table ): """simple docstring""" if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __SCREAMING_SNAKE_CASE : str = table_cast(_A , self.info.features.arrow_schema ) return pa_table def UpperCAmelCase__ ( self : Tuple , _A : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None if self.info.features is not None and self.config.columns is not None: if sorted(field.name for field in schema ) != sorted(self.config.columns ): raise ValueError( F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' ) for file_idx, file in enumerate(itertools.chain.from_iterable(_A ) ): with open(_A , '''rb''' ) as f: __SCREAMING_SNAKE_CASE : str = pq.ParquetFile(_A ) try: for batch_idx, record_batch in enumerate( parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ): __SCREAMING_SNAKE_CASE : Optional[Any] = pa.Table.from_batches([record_batch] ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield F'''{file_idx}_{batch_idx}''', self._cast_table(_A ) except ValueError as e: logger.error(F'''Failed to read file \'{file}\' with error {type(_A )}: {e}''' ) raise
74
0
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class SCREAMING_SNAKE_CASE__ : def __init__( self , A_ , A_=99 , A_=13 , A_=16 , A_=7 , A_=True , A_=True , A_=True , A_=False , A_=True , A_=2 , A_=32 , A_=4 , A_=4 , A_=30 , A_=0 , A_=1 , A_=2 , A_=None , )-> str: '''simple docstring''' UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = decoder_seq_length # For common tests UpperCamelCase = self.decoder_seq_length UpperCamelCase = is_training UpperCamelCase = use_attention_mask UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = d_model UpperCamelCase = d_model UpperCamelCase = decoder_layers UpperCamelCase = decoder_layers UpperCamelCase = decoder_ffn_dim UpperCamelCase = decoder_attention_heads UpperCamelCase = decoder_attention_heads UpperCamelCase = eos_token_id UpperCamelCase = bos_token_id UpperCamelCase = pad_token_id UpperCamelCase = decoder_start_token_id UpperCamelCase = use_cache UpperCamelCase = max_position_embeddings UpperCamelCase = None UpperCamelCase = decoder_seq_length UpperCamelCase = 2 UpperCamelCase = 1 def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_attention_mask: UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) UpperCamelCase = None if self.use_labels: UpperCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCamelCase = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ , )-> Optional[int]: '''simple docstring''' UpperCamelCase = True UpperCamelCase = TrOCRDecoder(config=A_ ).to(A_ ).eval() UpperCamelCase = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass UpperCamelCase = model(A_ , use_cache=A_ ) UpperCamelCase = model(A_ ) UpperCamelCase = model(A_ , use_cache=A_ ) self.parent.assertTrue(len(A_ ) == len(A_ ) ) self.parent.assertTrue(len(A_ ) == len(A_ ) + 1 ) UpperCamelCase = outputs['past_key_values'] # create hypothetical next token and extent to next_input_ids UpperCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCamelCase = model(A_ )['last_hidden_state'] UpperCamelCase = model(A_ , past_key_values=A_ )['last_hidden_state'] # select random slice UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(A_ , A_ , atol=1e-3 ) def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase): lowerCAmelCase_ = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () lowerCAmelCase_ = (TrOCRForCausalLM,) if is_torch_available() else () lowerCAmelCase_ = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {} lowerCAmelCase_ = True lowerCAmelCase_ = False def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=A_ ) UpperCamelCase = ConfigTester(self , config_class=A_ ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> str: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' self.config_tester.run_common_tests() def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*A_ ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' pass
3
from math import isclose, sqrt def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = point_y / 4 / point_x __SCREAMING_SNAKE_CASE : int = 2 * normal_gradient / (1 + normal_gradient * normal_gradient) __SCREAMING_SNAKE_CASE : Tuple = (1 - normal_gradient * normal_gradient) / ( 1 + normal_gradient * normal_gradient ) __SCREAMING_SNAKE_CASE : int = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient) # to find the next point, solve the simultaeneous equations: # y^2 + 4x^2 = 100 # y - b = m * (x - a) # ==> A x^2 + B x + C = 0 __SCREAMING_SNAKE_CASE : int = outgoing_gradient**2 + 4 __SCREAMING_SNAKE_CASE : List[str] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x) __SCREAMING_SNAKE_CASE : Optional[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100 __SCREAMING_SNAKE_CASE : str = ( -linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) __SCREAMING_SNAKE_CASE : int = ( -linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term ) ) / (2 * quadratic_term) # two solutions, one of which is our input point __SCREAMING_SNAKE_CASE : Dict = x_minus if isclose(snake_case , snake_case ) else x_plus __SCREAMING_SNAKE_CASE : Dict = point_y + outgoing_gradient * (next_x - point_x) return next_x, next_y, outgoing_gradient def a__ ( snake_case = 1.4 , snake_case = -9.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : float = first_x_coord __SCREAMING_SNAKE_CASE : float = first_y_coord __SCREAMING_SNAKE_CASE : float = (10.1 - point_y) / (0.0 - point_x) while not (-0.01 <= point_x <= 0.01 and point_y > 0): __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = next_point(snake_case , snake_case , snake_case ) num_reflections += 1 return num_reflections if __name__ == "__main__": print(f'''{solution() = }''')
74
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase : List[str] = None __UpperCamelCase : List[Any] = logging.get_logger(__name__) __UpperCamelCase : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase : str = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase : Optional[int] = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase : Optional[Any] = '''▁''' class a ( a__ ): snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = ['''input_ids''', '''attention_mask'''] snake_case__ = BarthezTokenizer def __init__( self , _snake_case=None , _snake_case=None , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , **_snake_case , ): """simple docstring""" lowerCAmelCase = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , **_snake_case , ) lowerCAmelCase = vocab_file lowerCAmelCase = False if not self.vocab_file else True def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] lowerCAmelCase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" lowerCAmelCase = [self.sep_token_id] lowerCAmelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCamelCase__ ( self , _snake_case , _snake_case = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_snake_case ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
4
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) _lowercase = { """configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ """GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTBigCodeForSequenceClassification""", """GPTBigCodeForTokenClassification""", """GPTBigCodeForCausalLM""", """GPTBigCodeModel""", """GPTBigCodePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
5
def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = [0 for i in range(len(snake_case ) )] # initialize interval's left pointer and right pointer __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = 0, 0 for i in range(1 , len(snake_case ) ): # case when current index is inside the interval if i <= right_pointer: __SCREAMING_SNAKE_CASE : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) __SCREAMING_SNAKE_CASE : Dict = min_edge while go_next(snake_case , snake_case , snake_case ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = i, i + z_result[i] - 1 return z_result def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string __SCREAMING_SNAKE_CASE : str = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(snake_case ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
74
0
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 10 , UpperCamelCase__: int = 1_000 , UpperCamelCase__: bool = True ): assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) ), "Invalid type of value(s) specified to function!" if min_val > max_val: raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" ) return min_val if option else max_val def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int ): return int((number_a + number_a) / 2 ) def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int ): assert ( isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) ), 'argument values must be type of "int"' if lower > higher: raise ValueError("""argument value for lower and higher must be(lower > higher)""" ) if not lower < to_guess < higher: raise ValueError( """guess value must be within the range of lower and higher value""" ) def answer(UpperCamelCase__: int ) -> str: if number > to_guess: return "high" elif number < to_guess: return "low" else: return "same" print("""started...""" ) SCREAMING_SNAKE_CASE__ = lower SCREAMING_SNAKE_CASE__ = higher SCREAMING_SNAKE_CASE__ = [] while True: SCREAMING_SNAKE_CASE__ = get_avg(UpperCamelCase__ , UpperCamelCase__ ) last_numbers.append(UpperCamelCase__ ) if answer(UpperCamelCase__ ) == "low": SCREAMING_SNAKE_CASE__ = number elif answer(UpperCamelCase__ ) == "high": SCREAMING_SNAKE_CASE__ = number else: break print(f'''guess the number : {last_numbers[-1]}''' ) print(f'''details : {last_numbers!s}''' ) def SCREAMING_SNAKE_CASE__ ( ): SCREAMING_SNAKE_CASE__ = int(input("""Enter lower value : """ ).strip() ) SCREAMING_SNAKE_CASE__ = int(input("""Enter high value : """ ).strip() ) SCREAMING_SNAKE_CASE__ = int(input("""Enter value to guess : """ ).strip() ) guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
6
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """SwinForImageClassification""", """SwinForMaskedImageModeling""", """SwinModel""", """SwinPreTrainedModel""", """SwinBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFSwinForImageClassification""", """TFSwinForMaskedImageModeling""", """TFSwinModel""", """TFSwinPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) a = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
7
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
0
'''simple docstring''' import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings lowercase__ : Dict = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__ ) class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} ) lowerCAmelCase = field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) lowerCAmelCase = field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' __A : List[Any] = super().to_dict() for k, v in d.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase): __A : List[Any] = v.to_dict() return d
8
from pathlib import Path import fire def a__ ( snake_case , snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = Path(snake_case ) __SCREAMING_SNAKE_CASE : Dict = Path(snake_case ) dest_dir.mkdir(exist_ok=snake_case ) for path in src_dir.iterdir(): __SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in list(path.open().readlines() )][:n] __SCREAMING_SNAKE_CASE : Tuple = dest_dir.joinpath(path.name ) print(snake_case ) dest_path.open('''w''' ).write('''\n'''.join(snake_case ) ) if __name__ == "__main__": fire.Fire(minify)
74
0
import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __lowerCAmelCase : """simple docstring""" @property def _a ( self : Tuple ): """simple docstring""" return self.get_dummy_input() @property def _a ( self : List[str] ): """simple docstring""" if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' ) def _a ( self : List[Any] , _snake_case : List[Any]=True , _snake_case : int=False , _snake_case : List[str]=False , _snake_case : str=False , ): """simple docstring""" A__ = 4 A__ = 32 A__ = (32, 32) A__ = torch.manual_seed(0 ) A__ = torch.device(_snake_case ) A__ = (batch_size, num_channels) + sizes A__ = randn_tensor(_snake_case , generator=_snake_case , device=_snake_case ) A__ = {'hidden_states': hidden_states} if include_temb: A__ = 1_28 A__ = randn_tensor((batch_size, temb_channels) , generator=_snake_case , device=_snake_case ) if include_res_hidden_states_tuple: A__ = torch.manual_seed(1 ) A__ = (randn_tensor(_snake_case , generator=_snake_case , device=_snake_case ),) if include_encoder_hidden_states: A__ = floats_tensor((batch_size, 32, 32) ).to(_snake_case ) if include_skip_sample: A__ = randn_tensor(((batch_size, 3) + sizes) , generator=_snake_case , device=_snake_case ) return dummy_input def _a ( self : Dict ): """simple docstring""" A__ = { 'in_channels': 32, 'out_channels': 32, 'temb_channels': 1_28, } if self.block_type == "up": A__ = 32 if self.block_type == "mid": init_dict.pop('out_channels' ) A__ = self.dummy_input return init_dict, inputs_dict def _a ( self : Tuple , _snake_case : Optional[int] ): """simple docstring""" A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.block_class(**_snake_case ) unet_block.to(_snake_case ) unet_block.eval() with torch.no_grad(): A__ = unet_block(**_snake_case ) if isinstance(_snake_case , _snake_case ): A__ = output[0] self.assertEqual(output.shape , self.output_shape ) A__ = output[0, -1, -3:, -3:] A__ = torch.tensor(_snake_case ).to(_snake_case ) assert torch_all_close(output_slice.flatten() , _snake_case , atol=5E-3 ) @unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' ) def _a ( self : Union[str, Any] ): """simple docstring""" A__ , A__ = self.prepare_init_args_and_inputs_for_common() A__ = self.block_class(**_snake_case ) model.to(_snake_case ) model.train() A__ = model(**_snake_case ) if isinstance(_snake_case , _snake_case ): A__ = output[0] A__ = torch.device(_snake_case ) A__ = randn_tensor(output.shape , device=_snake_case ) A__ = torch.nn.functional.mse_loss(_snake_case , _snake_case ) loss.backward()
9
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]] __SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A ) self.assertTrue(isinstance(dc.token_ids , _A ) ) with self.assertRaises(_A ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(_A ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(_A ): DisjunctiveConstraint(_A ) # fails here def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]] __SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 ) __SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False self.assertTrue(_A ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(_A ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] __SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
74
0
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys _lowerCAmelCase = "3" print("Python version:", sys.version) print("OS platform:", platform.platform()) print("OS architecture:", platform.machine()) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) except ImportError: print("Torch version:", None) try: import transformers print("transformers version:", transformers.__version__) except ImportError: print("transformers version:", None)
10
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowercase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowercase_ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the training data.'''} ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} ) lowerCAmelCase_ = field( default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} ) lowerCAmelCase_ = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} ) lowerCAmelCase_ = field( default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of training examples to this ''' '''value if set.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''For debugging purposes or quicker training, truncate the number of evaluation examples to this ''' '''value if set.''' ) } , ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = {} if self.train_dir is not None: __SCREAMING_SNAKE_CASE : Dict = self.train_dir if self.validation_dir is not None: __SCREAMING_SNAKE_CASE : Any = self.validation_dir __SCREAMING_SNAKE_CASE : List[Any] = data_files if data_files else None @dataclass class __UpperCamelCase : """simple docstring""" lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ''' '''checkpoint identifier on the hub. ''' '''Don\'t set if you want to train a model from scratch.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(lowerCAmelCase__ )} , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , ) lowerCAmelCase_ = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) lowerCAmelCase_ = field(default=lowerCAmelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={ '''help''': ( '''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.''' ) } , ) lowerCAmelCase_ = field( default=lowerCAmelCase__ , metadata={'''help''': '''Stride to use for the encoder.'''} , ) class __UpperCamelCase : """simple docstring""" def __init__( self : Tuple , _A : Optional[int]=192 , _A : List[Any]=32 , _A : Optional[int]=4 , _A : str=0.6 ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = input_size __SCREAMING_SNAKE_CASE : List[str] = mask_patch_size __SCREAMING_SNAKE_CASE : Dict = model_patch_size __SCREAMING_SNAKE_CASE : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('''Input size must be divisible by mask patch size''' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('''Mask patch size must be divisible by model patch size''' ) __SCREAMING_SNAKE_CASE : Any = self.input_size // self.mask_patch_size __SCREAMING_SNAKE_CASE : Optional[Any] = self.mask_patch_size // self.model_patch_size __SCREAMING_SNAKE_CASE : int = self.rand_size**2 __SCREAMING_SNAKE_CASE : Optional[int] = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = np.random.permutation(self.token_count )[: self.mask_count] __SCREAMING_SNAKE_CASE : Union[str, Any] = np.zeros(self.token_count , dtype=_A ) __SCREAMING_SNAKE_CASE : Optional[int] = 1 __SCREAMING_SNAKE_CASE : List[str] = mask.reshape((self.rand_size, self.rand_size) ) __SCREAMING_SNAKE_CASE : List[Any] = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([example['''pixel_values'''] for example in examples] ) __SCREAMING_SNAKE_CASE : Any = torch.stack([example['''mask'''] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def a__ ( ): """simple docstring""" # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mim''' , snake_case , snake_case ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(snake_case ) transformers.utils.logging.set_verbosity(snake_case ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. __SCREAMING_SNAKE_CASE : Tuple = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __SCREAMING_SNAKE_CASE : Optional[int] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. __SCREAMING_SNAKE_CASE : Tuple = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __SCREAMING_SNAKE_CASE : Any = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , snake_case ) and data_args.train_val_split > 0.0: __SCREAMING_SNAKE_CASE : List[str] = ds['''train'''].train_test_split(data_args.train_val_split ) __SCREAMING_SNAKE_CASE : int = split['''train'''] __SCREAMING_SNAKE_CASE : Dict = split['''test'''] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __SCREAMING_SNAKE_CASE : List[Any] = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __SCREAMING_SNAKE_CASE : str = AutoConfig.from_pretrained(model_args.config_name_or_path , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(snake_case , '''decoder_type''' ): __SCREAMING_SNAKE_CASE : Any = '''simmim''' # adapt config __SCREAMING_SNAKE_CASE : str = model_args.image_size if model_args.image_size is not None else config.image_size __SCREAMING_SNAKE_CASE : int = model_args.patch_size if model_args.patch_size is not None else config.patch_size __SCREAMING_SNAKE_CASE : str = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { '''image_size''': model_args.image_size, '''patch_size''': model_args.patch_size, '''encoder_stride''': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __SCREAMING_SNAKE_CASE : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case ) elif model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : List[Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case ) else: __SCREAMING_SNAKE_CASE : List[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __SCREAMING_SNAKE_CASE : str = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __SCREAMING_SNAKE_CASE : int = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) __SCREAMING_SNAKE_CASE : List[Any] = AutoModelForMaskedImageModeling.from_config(snake_case ) if training_args.do_train: __SCREAMING_SNAKE_CASE : Any = ds['''train'''].column_names else: __SCREAMING_SNAKE_CASE : int = ds['''validation'''].column_names if data_args.image_column_name is not None: __SCREAMING_SNAKE_CASE : List[Any] = data_args.image_column_name elif "image" in column_names: __SCREAMING_SNAKE_CASE : str = '''image''' elif "img" in column_names: __SCREAMING_SNAKE_CASE : List[str] = '''img''' else: __SCREAMING_SNAKE_CASE : Tuple = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __SCREAMING_SNAKE_CASE : Any = Compose( [ Lambda(lambda snake_case : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __SCREAMING_SNAKE_CASE : str = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(snake_case ): __SCREAMING_SNAKE_CASE : str = [transforms(snake_case ) for image in examples[image_column_name]] __SCREAMING_SNAKE_CASE : str = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: __SCREAMING_SNAKE_CASE : Dict = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(snake_case ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: __SCREAMING_SNAKE_CASE : Union[str, Any] = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(snake_case ) # Initialize our trainer __SCREAMING_SNAKE_CASE : List[str] = Trainer( model=snake_case , args=snake_case , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=snake_case , data_collator=snake_case , ) # Training if training_args.do_train: __SCREAMING_SNAKE_CASE : Union[str, Any] = None if training_args.resume_from_checkpoint is not None: __SCREAMING_SNAKE_CASE : Tuple = training_args.resume_from_checkpoint elif last_checkpoint is not None: __SCREAMING_SNAKE_CASE : int = last_checkpoint __SCREAMING_SNAKE_CASE : Tuple = trainer.train(resume_from_checkpoint=snake_case ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate() trainer.log_metrics('''eval''' , snake_case ) trainer.save_metrics('''eval''' , snake_case ) # Write model card and (optionally) push to hub __SCREAMING_SNAKE_CASE : Optional[Any] = { '''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''masked-image-modeling''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-image-modeling'''], } if training_args.push_to_hub: trainer.push_to_hub(**snake_case ) else: trainer.create_model_card(**snake_case ) if __name__ == "__main__": main()
74
0
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) lowercase_ = parser.parse_args() lowercase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowercase_ = CLIPImageProcessor() lowercase_ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") lowercase_ = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
11
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { """facebook/data2vec-vision-base-ft""": ( """https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = '''data2vec-vision''' def __init__( self : Optional[int] , _A : List[Any]=768 , _A : Any=12 , _A : str=12 , _A : Union[str, Any]=3072 , _A : Union[str, Any]="gelu" , _A : List[Any]=0.0 , _A : Dict=0.0 , _A : Dict=0.02 , _A : Any=1e-12 , _A : Optional[Any]=224 , _A : Union[str, Any]=16 , _A : Tuple=3 , _A : List[Any]=False , _A : List[str]=False , _A : Dict=False , _A : Dict=False , _A : Any=0.1 , _A : List[str]=0.1 , _A : Dict=True , _A : Dict=[3, 5, 7, 11] , _A : Union[str, Any]=[1, 2, 3, 6] , _A : Optional[Any]=True , _A : Any=0.4 , _A : List[str]=256 , _A : Any=1 , _A : Any=False , _A : Union[str, Any]=255 , **_A : Tuple , ): """simple docstring""" super().__init__(**_A ) __SCREAMING_SNAKE_CASE : Any = hidden_size __SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Tuple = num_attention_heads __SCREAMING_SNAKE_CASE : List[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = initializer_range __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps __SCREAMING_SNAKE_CASE : Any = image_size __SCREAMING_SNAKE_CASE : Optional[int] = patch_size __SCREAMING_SNAKE_CASE : Any = num_channels __SCREAMING_SNAKE_CASE : List[str] = use_mask_token __SCREAMING_SNAKE_CASE : List[Any] = use_absolute_position_embeddings __SCREAMING_SNAKE_CASE : Dict = use_relative_position_bias __SCREAMING_SNAKE_CASE : str = use_shared_relative_position_bias __SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value __SCREAMING_SNAKE_CASE : str = drop_path_rate __SCREAMING_SNAKE_CASE : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : str = out_indices __SCREAMING_SNAKE_CASE : List[str] = pool_scales # auxiliary head attributes (semantic segmentation) __SCREAMING_SNAKE_CASE : Tuple = use_auxiliary_head __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_loss_weight __SCREAMING_SNAKE_CASE : Union[str, Any] = auxiliary_channels __SCREAMING_SNAKE_CASE : List[Any] = auxiliary_num_convs __SCREAMING_SNAKE_CASE : Optional[Any] = auxiliary_concat_input __SCREAMING_SNAKE_CASE : Any = semantic_loss_ignore_index class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = version.parse('''1.11''' ) @property def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return 1e-4
74
0
import os import sys import unittest lowerCamelCase__ : List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCamelCase__ : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""") class _snake_case ( unittest.TestCase ): def lowercase__ ( self): '''simple docstring''' lowercase__ : Optional[Any] = find_backend(""" if not is_torch_available():""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """torch""") # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") lowercase__ : int = find_backend(""" if not (is_torch_available() and is_transformers_available()):""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """torch_and_transformers""") # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") lowercase__ : Tuple = find_backend( """ if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """torch_and_transformers_and_onnx""") def lowercase__ ( self): '''simple docstring''' lowercase__ : int = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , SCREAMING_SNAKE_CASE_) self.assertIn("""torch_and_transformers""" , SCREAMING_SNAKE_CASE_) self.assertIn("""flax_and_transformers""" , SCREAMING_SNAKE_CASE_) self.assertIn("""torch_and_transformers_and_onnx""" , SCREAMING_SNAKE_CASE_) # Likewise, we can't assert on the exact content of a key self.assertIn("""UNet2DModel""" , objects["""torch"""]) self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""]) self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""]) self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""]) self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""]) self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""]) def lowercase__ ( self): '''simple docstring''' lowercase__ : Union[str, Any] = create_dummy_object("""CONSTANT""" , """'torch'""") self.assertEqual(SCREAMING_SNAKE_CASE_ , """\nCONSTANT = None\n""") lowercase__ : Any = create_dummy_object("""function""" , """'torch'""") self.assertEqual( SCREAMING_SNAKE_CASE_ , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""") lowercase__ : Tuple = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ lowercase__ : List[str] = create_dummy_object("""FakeClass""" , """'torch'""") self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) def lowercase__ ( self): '''simple docstring''' lowercase__ : str = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ lowercase__ : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]}) self.assertEqual(dummy_files["""torch"""] , SCREAMING_SNAKE_CASE_)
12
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[str] , _A : Optional[int] , _A : Optional[Any]=13 , _A : List[Any]=7 , _A : List[str]=True , _A : Dict=True , _A : Tuple=False , _A : Union[str, Any]=True , _A : List[str]=99 , _A : Union[str, Any]=32 , _A : str=5 , _A : Union[str, Any]=4 , _A : int=37 , _A : int="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : List[Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : Optional[int]=4 , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : str = seq_length __SCREAMING_SNAKE_CASE : int = is_training __SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask __SCREAMING_SNAKE_CASE : str = use_token_type_ids __SCREAMING_SNAKE_CASE : Any = use_labels __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Optional[int] = hidden_size __SCREAMING_SNAKE_CASE : Any = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads __SCREAMING_SNAKE_CASE : List[str] = intermediate_size __SCREAMING_SNAKE_CASE : List[str] = hidden_act __SCREAMING_SNAKE_CASE : int = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size __SCREAMING_SNAKE_CASE : int = initializer_range __SCREAMING_SNAKE_CASE : List[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = num_choices __SCREAMING_SNAKE_CASE : Union[str, Any] = scope def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_input_mask: __SCREAMING_SNAKE_CASE : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) __SCREAMING_SNAKE_CASE : Any = None __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : int = None if self.use_labels: __SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices ) __SCREAMING_SNAKE_CASE : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase__ ( self : Optional[int] ): """simple docstring""" return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : Optional[int] , _A : int , _A : Union[str, Any] , _A : List[str] , _A : Dict , _A : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCAmelCase__ ( self : Tuple , _A : Dict , _A : Tuple , _A : str , _A : Optional[int] , _A : List[str] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForMaskedLM(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Tuple = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : str , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForQuestionAnswering(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : int = model( _A , attention_mask=_A , start_positions=_A , end_positions=_A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCAmelCase__ ( self : Dict , _A : List[str] , _A : Tuple , _A : str , _A : Tuple , _A : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels __SCREAMING_SNAKE_CASE : List[Any] = DistilBertForSequenceClassification(_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : List[str] , _A : int , _A : List[Any] , _A : Any , _A : Any , _A : str , _A : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = DistilBertForTokenClassification(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : Dict = model(_A , attention_mask=_A , labels=_A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCAmelCase__ ( self : Dict , _A : Optional[int] , _A : int , _A : Optional[int] , _A : List[Any] , _A : int , _A : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.num_choices __SCREAMING_SNAKE_CASE : int = DistilBertForMultipleChoice(config=_A ) model.to(_A ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __SCREAMING_SNAKE_CASE : Optional[Any] = model( _A , attention_mask=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() ((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : List[Any] = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCAmelCase_ = ( { '''feature-extraction''': DistilBertModel, '''fill-mask''': DistilBertForMaskedLM, '''question-answering''': DistilBertForQuestionAnswering, '''text-classification''': DistilBertForSequenceClassification, '''token-classification''': DistilBertForTokenClassification, '''zero-shot''': DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True lowerCAmelCase_ = True def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = DistilBertModelTester(self ) __SCREAMING_SNAKE_CASE : List[str] = ConfigTester(self , config_class=_A , dim=37 ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*_A ) def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*_A ) def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*_A ) def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*_A ) @slow def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained(_A ) self.assertIsNotNone(_A ) @slow @require_torch_gpu def UpperCAmelCase__ ( self : str ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return __SCREAMING_SNAKE_CASE : Dict = True __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(config=_A ) __SCREAMING_SNAKE_CASE : int = self._prepare_for_class(_A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace( _A , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(_A , os.path.join(_A , '''traced_model.pt''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = torch.jit.load(os.path.join(_A , '''traced_model.pt''' ) , map_location=_A ) loaded(inputs_dict['''input_ids'''].to(_A ) , inputs_dict['''attention_mask'''].to(_A ) ) @require_torch class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" @slow def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = DistilBertModel.from_pretrained('''distilbert-base-uncased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Union[str, Any] = model(_A , attention_mask=_A )[0] __SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , _A ) __SCREAMING_SNAKE_CASE : Any = torch.tensor( [[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4 ) )
74
0
'''simple docstring''' import logging from transformers.configuration_utils import PretrainedConfig A__ : Union[str, Any] = logging.getLogger(__name__) class UpperCAmelCase_ (_UpperCAmelCase ): """simple docstring""" lowerCamelCase : Dict = 'masked_bert' def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="topK" , SCREAMING_SNAKE_CASE_="constant" , SCREAMING_SNAKE_CASE_=0.0 , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]: super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) __lowerCamelCase : List[Any] = vocab_size __lowerCamelCase : Dict = hidden_size __lowerCamelCase : Union[str, Any] = num_hidden_layers __lowerCamelCase : List[Any] = num_attention_heads __lowerCamelCase : str = hidden_act __lowerCamelCase : Union[str, Any] = intermediate_size __lowerCamelCase : str = hidden_dropout_prob __lowerCamelCase : Tuple = attention_probs_dropout_prob __lowerCamelCase : int = max_position_embeddings __lowerCamelCase : Any = type_vocab_size __lowerCamelCase : List[Any] = initializer_range __lowerCamelCase : Union[str, Any] = layer_norm_eps __lowerCamelCase : List[Any] = pruning_method __lowerCamelCase : Dict = mask_init __lowerCamelCase : List[Any] = mask_scale
13
import logging import os import threading import time try: import warnings except ImportError: lowercase_ = None try: import msvcrt except ImportError: lowercase_ = None try: import fcntl except ImportError: lowercase_ = None # Backward compatibility # ------------------------------------------------ try: TimeoutError except NameError: lowercase_ = OSError # Data # ------------------------------------------------ lowercase_ = [ """Timeout""", """BaseFileLock""", """WindowsFileLock""", """UnixFileLock""", """SoftFileLock""", """FileLock""", ] lowercase_ = """3.0.12""" lowercase_ = None def a__ ( ): """simple docstring""" global _logger __SCREAMING_SNAKE_CASE : Optional[Any] = _logger or logging.getLogger(__name__ ) return _logger class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = lock_file return None def __str__( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.''' return temp class __UpperCamelCase : """simple docstring""" def __init__( self : Optional[Any] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = lock return None def __enter__( self : Any ): """simple docstring""" return self.lock def __exit__( self : str , _A : Any , _A : int , _A : Any ): """simple docstring""" self.lock.release() return None class __UpperCamelCase : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[int]=-1 , _A : List[Any]=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = max_filename_length if max_filename_length is not None else 255 # Hash the filename if it's too long __SCREAMING_SNAKE_CASE : Optional[Any] = self.hash_filename_if_too_long(_A , _A ) # The path to the lock file. __SCREAMING_SNAKE_CASE : Tuple = lock_file # The file descriptor for the *_lock_file* as it is returned by the # os.open() function. # This file lock is only NOT None, if the object currently holds the # lock. __SCREAMING_SNAKE_CASE : str = None # The default timeout value. __SCREAMING_SNAKE_CASE : Any = timeout # We use this lock primarily for the lock counter. __SCREAMING_SNAKE_CASE : int = threading.Lock() # The lock counter is used for implementing the nested locking # mechanism. Whenever the lock is acquired, the counter is increased and # the lock is only released, when this value is 0 again. __SCREAMING_SNAKE_CASE : int = 0 return None @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._timeout @timeout.setter def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = float(_A ) return None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" raise NotImplementedError() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" raise NotImplementedError() @property def UpperCAmelCase__ ( self : List[str] ): """simple docstring""" return self._lock_file_fd is not None def UpperCAmelCase__ ( self : Tuple , _A : List[Any]=None , _A : Optional[Any]=0.05 ): """simple docstring""" if timeout is None: __SCREAMING_SNAKE_CASE : Optional[int] = self.timeout # Increment the number right at the beginning. # We can still undo it, if something fails. with self._thread_lock: self._lock_counter += 1 __SCREAMING_SNAKE_CASE : Tuple = id(self ) __SCREAMING_SNAKE_CASE : Any = self._lock_file __SCREAMING_SNAKE_CASE : Union[str, Any] = time.time() try: while True: with self._thread_lock: if not self.is_locked: logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' ) self._acquire() if self.is_locked: logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' ) break elif timeout >= 0 and time.time() - start_time > timeout: logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' ) raise Timeout(self._lock_file ) else: logger().debug( F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' ) time.sleep(_A ) except: # noqa # Something did go wrong, so decrement the counter. with self._thread_lock: __SCREAMING_SNAKE_CASE : Optional[Any] = max(0 , self._lock_counter - 1 ) raise return _Acquire_ReturnProxy(lock=self ) def UpperCAmelCase__ ( self : int , _A : List[str]=False ): """simple docstring""" with self._thread_lock: if self.is_locked: self._lock_counter -= 1 if self._lock_counter == 0 or force: __SCREAMING_SNAKE_CASE : Optional[int] = id(self ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self._lock_file logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' ) self._release() __SCREAMING_SNAKE_CASE : int = 0 logger().debug(F'''Lock {lock_id} released on {lock_filename}''' ) return None def __enter__( self : int ): """simple docstring""" self.acquire() return self def __exit__( self : Optional[int] , _A : List[str] , _A : List[Any] , _A : int ): """simple docstring""" self.release() return None def __del__( self : int ): """simple docstring""" self.release(force=_A ) return None def UpperCAmelCase__ ( self : Optional[int] , _A : str , _A : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = os.path.basename(_A ) if len(_A ) > max_length and max_length > 0: __SCREAMING_SNAKE_CASE : Tuple = os.path.dirname(_A ) __SCREAMING_SNAKE_CASE : Optional[int] = str(hash(_A ) ) __SCREAMING_SNAKE_CASE : Optional[int] = filename[: max_length - len(_A ) - 8] + '''...''' + hashed_filename + '''.lock''' return os.path.join(_A , _A ) else: return path class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : List[Any] , _A : Optional[Any] , _A : List[Any]=-1 , _A : Dict=None ): """simple docstring""" from .file_utils import relative_to_absolute_path super().__init__(_A , timeout=_A , max_filename_length=_A ) __SCREAMING_SNAKE_CASE : str = '''\\\\?\\''' + relative_to_absolute_path(self.lock_file ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : List[str] = os.open(self._lock_file , _A ) except OSError: pass else: try: msvcrt.locking(_A , msvcrt.LK_NBLCK , 1 ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : str = fd return None def UpperCAmelCase__ ( self : int ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self._lock_file_fd __SCREAMING_SNAKE_CASE : int = None msvcrt.locking(_A , msvcrt.LK_UNLCK , 1 ) os.close(_A ) try: os.remove(self._lock_file ) # Probably another instance of the application # that acquired the file lock. except OSError: pass return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Tuple , _A : Optional[int] , _A : Dict=-1 , _A : str=None ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = os.statvfs(os.path.dirname(_A ) ).f_namemax super().__init__(_A , timeout=_A , max_filename_length=_A ) def UpperCAmelCase__ ( self : List[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC __SCREAMING_SNAKE_CASE : int = os.open(self._lock_file , _A ) try: fcntl.flock(_A , fcntl.LOCK_EX | fcntl.LOCK_NB ) except OSError: os.close(_A ) else: __SCREAMING_SNAKE_CASE : int = fd return None def UpperCAmelCase__ ( self : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self._lock_file_fd __SCREAMING_SNAKE_CASE : Any = None fcntl.flock(_A , fcntl.LOCK_UN ) os.close(_A ) return None class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def UpperCAmelCase__ ( self : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: __SCREAMING_SNAKE_CASE : Optional[Any] = os.open(self._lock_file , _A ) except OSError: pass else: __SCREAMING_SNAKE_CASE : List[str] = fd return None def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" os.close(self._lock_file_fd ) __SCREAMING_SNAKE_CASE : Optional[Any] = None try: os.remove(self._lock_file ) # The file is already deleted and that's what we want. except OSError: pass return None lowercase_ = None if msvcrt: lowercase_ = WindowsFileLock elif fcntl: lowercase_ = UnixFileLock else: lowercase_ = SoftFileLock if warnings is not None: warnings.warn("""only soft file lock is available""")
74
0
import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel a__ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } a__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def __UpperCAmelCase ( __a : Any ,__a : List[str]=False ) -> List[Any]: """simple docstring""" _a , _a : List[Any] = create_model( '''HTSAT-tiny''' ,'''roberta''' ,__a ,precision='''fp32''' ,device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' ,enable_fusion=__a ,fusion_type='''aff_2d''' if enable_fusion else None ,) return model, model_cfg def __UpperCAmelCase ( __a : str ) -> Union[str, Any]: """simple docstring""" _a : Any = {} _a : int = R'''.*sequential.(\d+).*''' _a : List[Any] = R'''.*_projection.(\d+).*''' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _a : List[Any] = key.replace(__a ,__a ) if re.match(__a ,__a ): # replace sequential layers with list _a : int = re.match(__a ,__a ).group(1 ) _a : List[str] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(__a )//3}.linear.""" ) elif re.match(__a ,__a ): _a : Optional[Any] = int(re.match(__a ,__a ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _a : Optional[int] = 1 if projecton_layer == 0 else 2 _a : List[str] = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _a : List[str] = value _a : Union[str, Any] = mixed_qkv.size(0 ) // 3 _a : Tuple = mixed_qkv[:qkv_dim] _a : Dict = mixed_qkv[qkv_dim : qkv_dim * 2] _a : Dict = mixed_qkv[qkv_dim * 2 :] _a : Optional[int] = query_layer _a : Tuple = key_layer _a : Tuple = value_layer else: _a : Tuple = value return model_state_dict def __UpperCAmelCase ( __a : Tuple ,__a : Optional[int] ,__a : Dict ,__a : Dict=False ) -> Any: """simple docstring""" _a , _a : Optional[Any] = init_clap(__a ,enable_fusion=__a ) clap_model.eval() _a : Tuple = clap_model.state_dict() _a : Union[str, Any] = rename_state_dict(__a ) _a : Union[str, Any] = ClapConfig() _a : Dict = enable_fusion _a : Dict = ClapModel(__a ) # ignore the spectrogram embedding layer model.load_state_dict(__a ,strict=__a ) model.save_pretrained(__a ) transformers_config.save_pretrained(__a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') a__ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
14
import html from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin from ...utils import is_bsa_available, logging, requires_backends if is_bsa_available(): import bsa from bsa import BeautifulSoup lowercase_ = logging.get_logger(__name__) class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[Any] , **_A : Dict ): """simple docstring""" requires_backends(self , ['''bs4'''] ) super().__init__(**_A ) def UpperCAmelCase__ ( self : Optional[int] , _A : Any ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = [] __SCREAMING_SNAKE_CASE : Any = [] __SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent for parent in child.parents: # type: bs4.element.Tag __SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A ) xpath_tags.append(child.name ) xpath_subscripts.append( 0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) ) __SCREAMING_SNAKE_CASE : Any = parent xpath_tags.reverse() xpath_subscripts.reverse() return xpath_tags, xpath_subscripts def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' ) __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = [] for element in html_code.descendants: if type(_A ) == bsa.element.NavigableString: if type(element.parent ) != bsa.element.Tag: continue __SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip() if not text_in_this_tag: continue all_doc_strings.append(_A ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A ) stringaxtag_seq.append(_A ) stringaxsubs_seq.append(_A ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xtags does not correspond''' ) if len(_A ) != len(_A ): raise ValueError('''Number of doc strings and xsubs does not correspond''' ) return all_doc_strings, stringaxtag_seq, stringaxsubs_seq def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' for tagname, subs in zip(_A , _A ): xpath += F'''/{tagname}''' if subs != 0: xpath += F'''[{subs}]''' return xpath def __call__( self : Optional[int] , _A : Tuple ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = False # Check that strings has a valid type if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE : Any = True elif isinstance(_A , (list, tuple) ): if len(_A ) == 0 or isinstance(html_strings[0] , _A ): __SCREAMING_SNAKE_CASE : List[Any] = True if not valid_strings: raise ValueError( '''HTML strings must of type `str`, `List[str]` (batch of examples), ''' F'''but is of type {type(_A )}.''' ) __SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) ) if not is_batched: __SCREAMING_SNAKE_CASE : Dict = [html_strings] # Get nodes + xpaths __SCREAMING_SNAKE_CASE : str = [] __SCREAMING_SNAKE_CASE : Tuple = [] for html_string in html_strings: __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A ) nodes.append(_A ) __SCREAMING_SNAKE_CASE : Dict = [] for node, tag_list, sub_list in zip(_A , _A , _A ): __SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A ) xpath_strings.append(_A ) xpaths.append(_A ) # return as Dict __SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths} __SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A ) return encoded_inputs
74
0
import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) A : str = [ 'cross_validation.py', 'gradient_accumulation.py', 'local_sgd.py', 'multi_process_metrics.py', 'memory.py', 'automatic_gradient_accumulation.py', 'fsdp_with_peak_mem_tracking.py', 'deepspeed_with_config_support.py', 'megatron_lm_gpt_pretraining.py', ] class A ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool , _UpperCAmelCase : str = None , _UpperCAmelCase : list = None ) -> List[Any]: """simple docstring""" lowercase__ = None lowercase__ = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) ) lowercase__ = os.path.abspath("""examples""" ) for item in os.listdir(_UpperCAmelCase ): if item not in EXCLUDE_EXAMPLES: lowercase__ = os.path.join(_UpperCAmelCase , _UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ) and ".py" in item_path: with self.subTest( tested_script=_UpperCAmelCase , feature_script=_UpperCAmelCase , tested_section="""main()""" if parser_only else """training_function()""" , ): lowercase__ = compare_against_test( os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowercase__ = """\n""".join(_UpperCAmelCase ) if special_strings is not None: for string in special_strings: lowercase__ = diff.replace(_UpperCAmelCase , """""" ) self.assertEqual(_UpperCAmelCase , """""" ) def lowerCamelCase__ (self : Optional[int] ) -> int: """simple docstring""" self.one_complete_example("""complete_nlp_example.py""" , _UpperCAmelCase ) self.one_complete_example("""complete_nlp_example.py""" , _UpperCAmelCase ) def lowerCamelCase__ (self : Tuple ) -> Dict: """simple docstring""" lowercase__ = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) ) lowercase__ = [ """ """ * 16 + """{\n\n""", """ """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""", """ """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""", """ """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""", """ """ * 20 + """\"epoch\": epoch,\n\n""", """ """ * 16 + """},\n\n""", """ """ * 16 + """step=epoch,\n""", """ """ * 12, """ """ * 8 + """for step, batch in enumerate(active_dataloader):\n""", ] self.one_complete_example("""complete_cv_example.py""" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) self.one_complete_example("""complete_cv_example.py""" , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = False @classmethod def lowerCamelCase__ (cls : Tuple ) -> int: """simple docstring""" super().setUpClass() lowercase__ = tempfile.mkdtemp() lowercase__ = os.path.join(cls._tmpdir , """default_config.yml""" ) write_basic_config(save_location=cls.configPath ) lowercase__ = ["""accelerate""", """launch""", """--config_file""", cls.configPath] @classmethod def lowerCamelCase__ (cls : List[Any] ) -> int: """simple docstring""" super().tearDownClass() shutil.rmtree(cls._tmpdir ) def lowerCamelCase__ (self : str ) -> Optional[int]: """simple docstring""" lowercase__ = f''' examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) ) def lowerCamelCase__ (self : List[Any] ) -> Dict: """simple docstring""" lowercase__ = f''' examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} '''.split() lowercase__ = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) ) def lowerCamelCase__ (self : Dict ) -> Union[str, Any]: """simple docstring""" lowercase__ = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )} '''.split() lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) self.assertNotIn("""epoch 0:""" , _UpperCAmelCase ) self.assertIn("""epoch 1:""" , _UpperCAmelCase ) def lowerCamelCase__ (self : List[Any] ) -> Tuple: """simple docstring""" lowercase__ = f''' examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )} '''.split() lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) if torch.cuda.is_available(): lowercase__ = torch.cuda.device_count() else: lowercase__ = 1 if num_processes > 1: self.assertNotIn("""epoch 0:""" , _UpperCAmelCase ) self.assertIn("""epoch 1:""" , _UpperCAmelCase ) else: self.assertIn("""epoch 0:""" , _UpperCAmelCase ) self.assertIn("""epoch 1:""" , _UpperCAmelCase ) @slow def lowerCamelCase__ (self : Union[str, Any] ) -> Any: """simple docstring""" lowercase__ = """ examples/by_feature/cross_validation.py --num_folds 2 """.split() with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ): lowercase__ = run_command(self._launch_args + testargs , return_stdout=_UpperCAmelCase ) lowercase__ = re.findall("""({.+})""" , _UpperCAmelCase ) lowercase__ = [r for r in results if """accuracy""" in r][-1] lowercase__ = ast.literal_eval(_UpperCAmelCase ) self.assertGreaterEqual(results["""accuracy"""] , 0.75 ) def lowerCamelCase__ (self : List[Any] ) -> int: """simple docstring""" lowercase__ = ["""examples/by_feature/multi_process_metrics.py"""] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} ) def lowerCamelCase__ (self : List[str] ) -> str: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdir: lowercase__ = f''' examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} '''.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(_UpperCAmelCase , """tracking""" ) ) ) def lowerCamelCase__ (self : List[Any] ) -> Optional[int]: """simple docstring""" lowercase__ = ["""examples/by_feature/gradient_accumulation.py"""] run_command(self._launch_args + testargs ) def lowerCamelCase__ (self : List[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = ["""examples/by_feature/local_sgd.py"""] run_command(self._launch_args + testargs )
15
import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase_ = logging.get_logger() def a__ ( snake_case , snake_case , snake_case , snake_case , snake_case = True ): """simple docstring""" print(F'''Converting {name}...''' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": __SCREAMING_SNAKE_CASE : Tuple = timm.create_model('''levit_128s''' , pretrained=snake_case ) else: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_128''' , pretrained=snake_case ) if hidden_sizes == 192: __SCREAMING_SNAKE_CASE : Dict = timm.create_model('''levit_192''' , pretrained=snake_case ) if hidden_sizes == 256: __SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model('''levit_256''' , pretrained=snake_case ) if hidden_sizes == 384: __SCREAMING_SNAKE_CASE : Any = timm.create_model('''levit_384''' , pretrained=snake_case ) from_model.eval() __SCREAMING_SNAKE_CASE : str = LevitForImageClassificationWithTeacher(snake_case ).eval() __SCREAMING_SNAKE_CASE : int = OrderedDict() __SCREAMING_SNAKE_CASE : List[Any] = from_model.state_dict() __SCREAMING_SNAKE_CASE : Tuple = list(from_model.state_dict().keys() ) __SCREAMING_SNAKE_CASE : str = list(our_model.state_dict().keys() ) print(len(snake_case ) , len(snake_case ) ) for i in range(len(snake_case ) ): __SCREAMING_SNAKE_CASE : int = weights[og_keys[i]] our_model.load_state_dict(snake_case ) __SCREAMING_SNAKE_CASE : str = torch.randn((2, 3, 224, 224) ) __SCREAMING_SNAKE_CASE : Tuple = from_model(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = our_model(snake_case ).logits assert torch.allclose(snake_case , snake_case ), "The model logits don't match the original one." __SCREAMING_SNAKE_CASE : Union[str, Any] = name print(snake_case ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) __SCREAMING_SNAKE_CASE : Union[str, Any] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'''Pushed {checkpoint_name}''' ) def a__ ( snake_case , snake_case = None , snake_case = True ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = '''imagenet-1k-id2label.json''' __SCREAMING_SNAKE_CASE : int = 1_000 __SCREAMING_SNAKE_CASE : Optional[int] = (1, num_labels) __SCREAMING_SNAKE_CASE : Any = '''huggingface/label-files''' __SCREAMING_SNAKE_CASE : Optional[Any] = num_labels __SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {int(snake_case ): v for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : str = idalabel __SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in idalabel.items()} __SCREAMING_SNAKE_CASE : List[str] = partial(snake_case , num_labels=snake_case , idalabel=snake_case , labelaid=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''levit-128S''': 128, '''levit-128''': 128, '''levit-192''': 192, '''levit-256''': 256, '''levit-384''': 384, } __SCREAMING_SNAKE_CASE : Optional[int] = { '''levit-128S''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-128''': ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), '''levit-192''': ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-256''': ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), '''levit-384''': ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , snake_case , names_to_config[model_name] , snake_case , snake_case ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , snake_case , snake_case , snake_case , snake_case ) return config, expected_shape if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help="""The name of the model you wish to convert, it must be one of the supported Levit* architecture,""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""levit-dump-folder/""", type=Path, required=False, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") parser.add_argument( """--no-push_to_hub""", dest="""push_to_hub""", action="""store_false""", help="""Do not push model and image processor to the hub""", ) lowercase_ = parser.parse_args() lowercase_ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
74
0
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __A : Any = 'bart' __A : str = True @st.cache(allow_output_mutation=A__ ) def __a ( ): if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" ) SCREAMING_SNAKE_CASE = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" ) SCREAMING_SNAKE_CASE = qar_model.eval() else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (None, None) if MODEL_TYPE == "bart": SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("yjernite/bart_eli5" ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" ) SCREAMING_SNAKE_CASE = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" ) sas_model.load_state_dict(save_dict["model"] ) SCREAMING_SNAKE_CASE = sas_model.eval() else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_qa_sas_model( model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=A__ ) def __a ( ): if LOAD_DENSE_INDEX: SCREAMING_SNAKE_CASE = faiss.StandardGpuResources() SCREAMING_SNAKE_CASE = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"] SCREAMING_SNAKE_CASE = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , ) SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 ) SCREAMING_SNAKE_CASE = faiss.index_cpu_to_gpu(A__ , 1 , A__ ) wikiaab_gpu_index_flat.add(A__ ) # TODO fix for larger GPU else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (None, None) SCREAMING_SNAKE_CASE = Elasticsearch([{"host": "localhost", "port": "9200"}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=A__ ) def __a ( ): SCREAMING_SNAKE_CASE = datasets.load_dataset("eli5" , name="LFQA_reddit" ) SCREAMING_SNAKE_CASE = elia["train_eli5"] SCREAMING_SNAKE_CASE = np.memmap( "eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) ) SCREAMING_SNAKE_CASE = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(A__ ) return (elia_train, eli5_train_q_index) __A , __A , __A : Dict = load_indexes() __A , __A , __A , __A : Union[str, Any] = load_models() __A , __A : Union[str, Any] = load_train_data() def __a ( A__ : Any , A__ : List[str]=10 ): SCREAMING_SNAKE_CASE = embed_questions_for_retrieval([question] , A__ , A__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eli5_train_q_index.search(A__ , A__ ) SCREAMING_SNAKE_CASE = [elia_train[int(A__ )] for i in I[0]] return nn_examples def __a ( A__ : Optional[int] , A__ : Union[str, Any]="wiki40b" , A__ : List[str]="dense" , A__ : int=10 ): if source == "none": SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (" <P> ".join(["" for _ in range(11 )] ).strip(), []) else: if method == "dense": SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = query_qa_dense_index( A__ , A__ , A__ , A__ , A__ , A__ ) else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = query_es_index( A__ , A__ , index_name="english_wiki40b_snippets_100w" , n_results=A__ , ) SCREAMING_SNAKE_CASE = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] SCREAMING_SNAKE_CASE = "question: {} context: {}".format(A__ , A__ ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda A__ : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda A__ : None), } ) def __a ( A__ : List[str] , A__ : Tuple , A__ : Dict , A__ : Optional[int]=64 , A__ : Tuple=256 , A__ : Dict=False , A__ : Optional[Any]=2 , A__ : Optional[int]=0.9_5 , A__ : Any=0.8 ): with torch.no_grad(): SCREAMING_SNAKE_CASE = qa_sas_generate( A__ , A__ , A__ , num_answers=1 , num_beams=A__ , min_len=A__ , max_len=A__ , do_sample=A__ , temp=A__ , top_p=A__ , top_k=A__ , max_input_length=1024 , device="cuda:0" , )[0] return (answer, support_list) st.title('Long Form Question Answering with ELI5') # Start sidebar __A : int = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>' __A : Tuple = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __A : int = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n' st.sidebar.markdown(description, unsafe_allow_html=True) __A : List[Any] = [ 'Answer the question', 'View the retrieved document only', 'View the most similar ELI5 question and answer', 'Show me everything, please!', ] __A : Dict = st.sidebar.checkbox('Demo options') if demo_options: __A : List[Any] = st.sidebar.selectbox( '', action_list, index=3, ) __A : Optional[int] = action_list.index(action_st) __A : Optional[Any] = st.sidebar.selectbox( '', ['Show full text of passages', 'Show passage section titles'], index=0, ) __A : List[Any] = show_type == 'Show full text of passages' else: __A : Tuple = 3 __A : List[str] = True __A : Tuple = st.sidebar.checkbox('Retrieval options') if retrieval_options: __A : str = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n ' st.sidebar.markdown(retriever_info) __A : List[str] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none']) __A : Union[str, Any] = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed']) else: __A : str = 'wiki40b' __A : str = 'dense' __A : Optional[Any] = 'beam' __A : int = 2 __A : Dict = 6_4 __A : Optional[Any] = 2_5_6 __A : Dict = None __A : Optional[Any] = None __A : Optional[int] = st.sidebar.checkbox('Generation options') if generate_options: __A : Dict = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n ' st.sidebar.markdown(generate_info) __A : Optional[Any] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled']) __A : Union[str, Any] = st.sidebar.slider( 'Minimum generation length', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None ) __A : Any = st.sidebar.slider( 'Maximum generation length', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None ) if sampled == "beam": __A : Optional[int] = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __A : Any = st.sidebar.slider( 'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __A : Optional[int] = st.sidebar.slider( 'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __A : int = None # start main text __A : Optional[int] = [ '<MY QUESTION>', 'How do people make chocolate?', 'Why do we get a fever when we are sick?', 'How can different animals perceive different colors?', 'What is natural language processing?', 'What\'s the best way to treat a sunburn?', 'What exactly are vitamins ?', 'How does nuclear energy provide electricity?', 'What\'s the difference between viruses and bacteria?', 'Why are flutes classified as woodwinds when most of them are made out of metal ?', 'Why do people like drinking coffee even though it tastes so bad?', 'What happens when wine ages? How does it make the wine taste better?', 'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?', 'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?', 'How does New Zealand have so many large bird predators?', ] __A : Any = st.selectbox( 'What would you like to ask? ---- select <MY QUESTION> to enter a new query', questions_list, index=1, ) if question_s == "<MY QUESTION>": __A : List[Any] = st.text_input('Enter your question here:', '') else: __A : List[Any] = question_s if st.button('Show me!'): if action in [0, 1, 3]: if index_type == "mixed": __A , __A : str = make_support(question, source=wiki_source, method='dense', n_results=1_0) __A , __A : List[str] = make_support(question, source=wiki_source, method='sparse', n_results=1_0) __A : Optional[Any] = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __A : Union[str, Any] = support_list[:1_0] __A : List[str] = '<P> ' + ' <P> '.join([res[-1] for res in support_list]) else: __A , __A : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=1_0) if action in [0, 3]: __A , __A : Tuple = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == 'sampled'), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('### The model generated answer is:') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:') for i, res in enumerate(support_list): __A : Optional[int] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_')) __A : List[Any] = res[1].strip() if sec_titles == "": __A : List[str] = '[{}]({})'.format(res[0], wiki_url) else: __A : List[Any] = sec_titles.split(' & ') __A : Union[str, Any] = ' & '.join( ['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list] ) st.markdown( '{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True ) if action in [2, 3]: __A : Optional[int] = find_nearest_training(question) __A : List[Any] = nn_train_list[0] st.markdown( '--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title']) ) __A : Any = [ '{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != ''])) for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score'])) if i == 0 or sc > 2 ] st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st))) __A : Optional[int] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
16
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowercase_ = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
74
0
from __future__ import annotations from random import choice def __SCREAMING_SNAKE_CASE ( a__ : str ) -> Optional[Any]: return choice(a__ ) def __SCREAMING_SNAKE_CASE ( a__ : list[int] ,a__ : int ) -> int: __A : List[str] = random_pivot(a__ ) # partition based on pivot # linear time __A : Optional[int] = [e for e in lst if e < pivot] __A : Tuple = [e for e in lst if e > pivot] # if we get lucky, pivot might be the element we want. # we can easily see this: # small (elements smaller than k) # + pivot (kth element) # + big (elements larger than k) if len(a__ ) == k - 1: return pivot # pivot is in elements bigger than k elif len(a__ ) < k - 1: return kth_number(a__ ,k - len(a__ ) - 1 ) # pivot is in elements smaller than k else: return kth_number(a__ ,a__ ) if __name__ == "__main__": import doctest doctest.testmod()
17
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging lowercase_ = logging.get_logger(__name__) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[int] = set() __SCREAMING_SNAKE_CASE : str = [] def parse_line(snake_case ): for line in fp: if isinstance(snake_case , snake_case ): __SCREAMING_SNAKE_CASE : List[Any] = line.decode('''UTF-8''' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(''' ''' ): # process a single warning and move it to `selected_warnings`. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : List[Any] = '''\n'''.join(snake_case ) # Only keep the warnings specified in `targets` if any(F''': {x}: ''' in warning for x in targets ): selected_warnings.add(snake_case ) buffer.clear() continue else: __SCREAMING_SNAKE_CASE : int = line.strip() buffer.append(snake_case ) if from_gh: for filename in os.listdir(snake_case ): __SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case , snake_case ) if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with open(snake_case ) as fp: parse_line(snake_case ) else: try: with zipfile.ZipFile(snake_case ) as z: for filename in z.namelist(): if not os.path.isdir(snake_case ): # read the file if filename != "warnings.txt": continue with z.open(snake_case ) as fp: parse_line(snake_case ) except Exception: logger.warning( F'''{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.''' ) return selected_warnings def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = set() __SCREAMING_SNAKE_CASE : List[Any] = [os.path.join(snake_case , snake_case ) for p in os.listdir(snake_case ) if (p.endswith('''.zip''' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(snake_case , snake_case ) ) return selected_warnings if __name__ == "__main__": def a__ ( snake_case ): """simple docstring""" return values.split(''',''' ) lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) lowercase_ = parser.parse_args() lowercase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts lowercase_ = extract_warnings(args.output_dir, args.targets) lowercase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
74
0
'''simple docstring''' import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class lowerCAmelCase_ ( unittest.TestCase ): def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = tokenizer.decode(greedy_ids[0] ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> List[str]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase ) _lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :] _lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _lowerCAmelCase = cs.out[:-1] self.assertEqual(_lowerCAmelCase , _lowerCAmelCase ) def _snake_case ( self ) -> Dict: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id with CaptureStdout() as cs: _lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase ) model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _lowerCAmelCase = cs.out[:-1] # Remove the final "\n" _lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def _snake_case ( self ) -> Union[str, Any]: _lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) _lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase ) _lowerCAmelCase = -1 _lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase ) _lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 ) _lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} _lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_lowerCAmelCase ): _lowerCAmelCase = "" for new_text in streamer: streamer_text += new_text
18
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class __UpperCamelCase ( lowerCAmelCase__ ): """simple docstring""" lowerCAmelCase_ = 42 class __UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" @register_to_config def __init__( self : Dict , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : str = "geglu" , _A : bool = True , _A : bool = True , ): """simple docstring""" super().__init__() __SCREAMING_SNAKE_CASE : Dict = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[int] = attention_head_dim __SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim __SCREAMING_SNAKE_CASE : Tuple = in_channels __SCREAMING_SNAKE_CASE : str = torch.nn.GroupNorm(num_groups=_A , num_channels=_A , eps=1e-6 , affine=_A ) __SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(_A , _A ) # 3. Define transformers blocks __SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList( [ BasicTransformerBlock( _A , _A , _A , dropout=_A , cross_attention_dim=_A , activation_fn=_A , attention_bias=_A , double_self_attention=_A , norm_elementwise_affine=_A , ) for d in range(_A ) ] ) __SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(_A , _A ) def UpperCAmelCase__ ( self : str , _A : Dict , _A : int=None , _A : Tuple=None , _A : Dict=None , _A : List[Any]=1 , _A : Union[str, Any]=None , _A : bool = True , ): """simple docstring""" __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.shape __SCREAMING_SNAKE_CASE : Any = batch_frames // num_frames __SCREAMING_SNAKE_CASE : Dict = hidden_states __SCREAMING_SNAKE_CASE : str = hidden_states[None, :].reshape(_A , _A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm(_A ) __SCREAMING_SNAKE_CASE : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , _A , _A ) __SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(_A ) # 2. Blocks for block in self.transformer_blocks: __SCREAMING_SNAKE_CASE : Optional[Any] = block( _A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , class_labels=_A , ) # 3. Output __SCREAMING_SNAKE_CASE : Any = self.proj_out(_A ) __SCREAMING_SNAKE_CASE : List[str] = ( hidden_states[None, None, :] .reshape(_A , _A , _A , _A , _A ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) __SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.reshape(_A , _A , _A , _A ) __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=_A )
74
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case, __snake_case ) -> int: """simple docstring""" return int(input_a == input_a == 0 ) def lowerCamelCase__ ( ) -> None: """simple docstring""" print('''Truth Table of NOR Gate:''' ) print('''| Input 1 | Input 2 | Output |''' ) print(F'''| 0 | 0 | {nor_gate(0, 0 )} |''' ) print(F'''| 0 | 1 | {nor_gate(0, 1 )} |''' ) print(F'''| 1 | 0 | {nor_gate(1, 0 )} |''' ) print(F'''| 1 | 1 | {nor_gate(1, 1 )} |''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
19
import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py lowercase_ = """src/diffusers""" lowercase_ = """.""" # This is to make sure the diffusers module imported is the one in the repo. lowercase_ = importlib.util.spec_from_file_location( """diffusers""", os.path.join(DIFFUSERS_PATH, """__init__.py"""), submodule_search_locations=[DIFFUSERS_PATH], ) lowercase_ = spec.loader.load_module() def a__ ( snake_case , snake_case ): """simple docstring""" return line.startswith(snake_case ) or len(snake_case ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , snake_case ) is not None def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = object_name.split('''.''' ) __SCREAMING_SNAKE_CASE : str = 0 # First let's find the module where our object lives. __SCREAMING_SNAKE_CASE : Any = parts[i] while i < len(snake_case ) and not os.path.isfile(os.path.join(snake_case , F'''{module}.py''' ) ): i += 1 if i < len(snake_case ): __SCREAMING_SNAKE_CASE : str = os.path.join(snake_case , parts[i] ) if i >= len(snake_case ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(snake_case , F'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : Dict = f.readlines() # Now let's find the class / func in the code! __SCREAMING_SNAKE_CASE : Union[str, Any] = '''''' __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 for name in parts[i + 1 :]: while ( line_index < len(snake_case ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(snake_case ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). __SCREAMING_SNAKE_CASE : List[Any] = line_index while line_index < len(snake_case ) and _should_continue(lines[line_index] , snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : Dict = lines[start_index:line_index] return "".join(snake_case ) lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""") lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""") lowercase_ = re.compile(R"""<FILL\s+[^>]*>""") def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = code.split('''\n''' ) __SCREAMING_SNAKE_CASE : Dict = 0 while idx < len(snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(snake_case ): return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0] return "" def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Union[str, Any] = len(get_indent(snake_case ) ) > 0 if has_indent: __SCREAMING_SNAKE_CASE : List[Any] = F'''class Bla:\n{code}''' __SCREAMING_SNAKE_CASE : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=snake_case ) __SCREAMING_SNAKE_CASE : Optional[int] = black.format_str(snake_case , mode=snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = style_docstrings_in_code(snake_case ) return result[len('''class Bla:\n''' ) :] if has_indent else result def a__ ( snake_case , snake_case=False ): """simple docstring""" with open(snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __SCREAMING_SNAKE_CASE : List[str] = f.readlines() __SCREAMING_SNAKE_CASE : Optional[Any] = [] __SCREAMING_SNAKE_CASE : int = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(snake_case ): __SCREAMING_SNAKE_CASE : Dict = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = search.groups() __SCREAMING_SNAKE_CASE : int = find_code_in_diffusers(snake_case ) __SCREAMING_SNAKE_CASE : str = get_indent(snake_case ) __SCREAMING_SNAKE_CASE : Any = line_index + 1 if indent == theoretical_indent else line_index + 2 __SCREAMING_SNAKE_CASE : Dict = theoretical_indent __SCREAMING_SNAKE_CASE : Optional[int] = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. __SCREAMING_SNAKE_CASE : List[Any] = True while line_index < len(snake_case ) and should_continue: line_index += 1 if line_index >= len(snake_case ): break __SCREAMING_SNAKE_CASE : Any = lines[line_index] __SCREAMING_SNAKE_CASE : Optional[Any] = _should_continue(snake_case , snake_case ) and re.search(F'''^{indent}# End copy''' , snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 __SCREAMING_SNAKE_CASE : List[str] = lines[start_index:line_index] __SCREAMING_SNAKE_CASE : Dict = ''''''.join(snake_case ) # Remove any nested `Copied from` comments to avoid circular copies __SCREAMING_SNAKE_CASE : Tuple = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(snake_case ) is None] __SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = replace_pattern.replace('''with''' , '''''' ).split(''',''' ) __SCREAMING_SNAKE_CASE : Optional[Any] = [_re_replace_pattern.search(snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = pattern.groups() __SCREAMING_SNAKE_CASE : str = re.sub(snake_case , snake_case , snake_case ) if option.strip() == "all-casing": __SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.lower() , obja.lower() , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = re.sub(obja.upper() , obja.upper() , snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line __SCREAMING_SNAKE_CASE : Optional[Any] = blackify(lines[start_index - 1] + theoretical_code ) __SCREAMING_SNAKE_CASE : int = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: __SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:] __SCREAMING_SNAKE_CASE : str = start_index + 1 if overwrite and len(snake_case ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(snake_case , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(snake_case ) return diffs def a__ ( snake_case = False ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = glob.glob(os.path.join(snake_case , '''**/*.py''' ) , recursive=snake_case ) __SCREAMING_SNAKE_CASE : Tuple = [] for filename in all_files: __SCREAMING_SNAKE_CASE : int = is_copy_consistent(snake_case , snake_case ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(snake_case ) > 0: __SCREAMING_SNAKE_CASE : Optional[int] = '''\n'''.join(snake_case ) raise Exception( '''Found the following copy inconsistencies:\n''' + diff + '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""") lowercase_ = parser.parse_args() check_copies(args.fix_and_overwrite)
74
0