code
stringlengths
82
53.2k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) snake_case_ = (boundary[1] - boundary[0]) / steps snake_case_ = boundary[0] snake_case_ = boundary[1] snake_case_ = make_points(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case_ = 0.0 y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ ) for i in x_i: # print(i) y += h * f(SCREAMING_SNAKE_CASE__ ) y += (h / 2.0) * f(SCREAMING_SNAKE_CASE__ ) return y def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): snake_case_ = a + h while x < (b - h): yield x snake_case_ = x + h def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ): # enter your function here snake_case_ = (x - 0) * (x - 0) return y def __SCREAMING_SNAKE_CASE (): snake_case_ = 0.0 # Lower bound of integration snake_case_ = 1.0 # Upper bound of integration snake_case_ = 10.0 # define number of steps or resolution snake_case_ = [a, b] # define boundary of integration snake_case_ = method_a(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) print(F'''y = {y}''' ) if __name__ == "__main__": main()
39
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class A__( unittest.TestCase ): def _a ( self : List[str] ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE = get_activation('''swish''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _a ( self : List[Any] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = get_activation('''silu''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.SiLU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _a ( self : List[str] ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE = get_activation('''mish''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.Mish ) self.assertEqual(act(torch.tensor(-2_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _a ( self : int ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE = get_activation('''gelu''' ) self.assertIsInstance(__SCREAMING_SNAKE_CASE , nn.GELU ) self.assertEqual(act(torch.tensor(-1_00 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
482
0
'''simple docstring''' import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) def __A ( _A , _A , _A , _A ): """simple docstring""" def constraint_to_multiple_of(_A , _A , _A=0 , _A=None ): __a = round(val / multiple ) * multiple if max_val is not None and x > max_val: __a = math.floor(val / multiple ) * multiple if x < min_val: __a = math.ceil(val / multiple ) * multiple return x __a = (output_size, output_size) if isinstance(_A , _A ) else output_size __a , __a = get_image_size(_A ) __a , __a = output_size # determine new height and width __a = output_height / input_height __a = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __a = scale_width else: # fit height __a = scale_height __a = constraint_to_multiple_of(scale_height * input_height , multiple=_A ) __a = constraint_to_multiple_of(scale_width * input_width , multiple=_A ) return (new_height, new_width) class A_ ( a_ ): _SCREAMING_SNAKE_CASE = ["""pixel_values"""] def __init__( self : int , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Dict[str, int] = None , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , **__SCREAMING_SNAKE_CASE : List[Any] , ): super().__init__(**__SCREAMING_SNAKE_CASE ) __a = size if size is not None else {"height": 3_84, "width": 3_84} __a = get_size_dict(__SCREAMING_SNAKE_CASE ) __a = do_resize __a = size __a = keep_aspect_ratio __a = ensure_multiple_of __a = resample __a = do_rescale __a = rescale_factor __a = do_normalize __a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __a = image_std if image_std is not None else IMAGENET_STANDARD_STD def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ): __a = get_size_dict(__SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) __a = get_resize_output_image_size( __SCREAMING_SNAKE_CASE , output_size=(size["height"], size["width"]) , keep_aspect_ratio=__SCREAMING_SNAKE_CASE , multiple=__SCREAMING_SNAKE_CASE , ) return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[int, float] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : Any , ): return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Union[float, List[float]] , __SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **__SCREAMING_SNAKE_CASE : List[str] , ): return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : ImageInput , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : PILImageResampling = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : float = None , __SCREAMING_SNAKE_CASE : bool = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , __SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE : Dict , ): __a = do_resize if do_resize is not None else self.do_resize __a = size if size is not None else self.size __a = get_size_dict(__SCREAMING_SNAKE_CASE ) __a = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __a = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __a = resample if resample is not None else self.resample __a = do_rescale if do_rescale is not None else self.do_rescale __a = rescale_factor if rescale_factor is not None else self.rescale_factor __a = do_normalize if do_normalize is not None else self.do_normalize __a = image_mean if image_mean is not None else self.image_mean __a = image_std if image_std is not None else self.image_std __a = make_list_of_images(__SCREAMING_SNAKE_CASE ) if not valid_images(__SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. __a = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images] if do_resize: __a = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: __a = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: __a = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images] __a = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images] __a = {"pixel_values": images} return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Tuple] = None ): __a = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(__SCREAMING_SNAKE_CASE ): __a = target_sizes.numpy() __a = [] for idx in range(len(__SCREAMING_SNAKE_CASE ) ): __a = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=__SCREAMING_SNAKE_CASE ) __a = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(__SCREAMING_SNAKE_CASE ) else: __a = logits.argmax(dim=1 ) __a = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
717
from __future__ import annotations SCREAMING_SNAKE_CASE : Optional[int] = [] def __A ( _A , _A , _A ): """simple docstring""" for i in range(len(_A ) ): if board[row][i] == 1: return False for i in range(len(_A ) ): if board[i][column] == 1: return False for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ): if board[i][j] == 1: return False return True def __A ( _A , _A ): """simple docstring""" if row >= len(_A ): solution.append(_A ) printboard(_A ) print() return True for i in range(len(_A ) ): if is_safe(_A , _A , _A ): __a = 1 solve(_A , row + 1 ) __a = 0 return False def __A ( _A ): """simple docstring""" for i in range(len(_A ) ): for j in range(len(_A ) ): if board[i][j] == 1: print("Q" , end=" " ) else: print("." , end=" " ) print() # n=int(input("The no. of queens")) SCREAMING_SNAKE_CASE : List[str] = 8 SCREAMING_SNAKE_CASE : str = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
525
0
from collections import defaultdict from math import gcd def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_50_00_00 ) -> int: """simple docstring""" _UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE_ , 2 ): if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) > 1: continue _UpperCAmelCase = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(SCREAMING_SNAKE_CASE_ , limit + 1 , SCREAMING_SNAKE_CASE_ ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(f'''{solution() = }''')
32
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) UpperCAmelCase_ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) UpperCAmelCase_ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) UpperCAmelCase_ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) UpperCAmelCase_ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCAmelCase_ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModel) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class __UpperCamelCase ( _BaseAutoModelClass ): __A : Any = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class __UpperCamelCase ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class __UpperCamelCase ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCAmelCase_ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
32
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class a_ ( unittest.TestCase ): def __init__( self :List[Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=7 , _lowercase :Tuple=3 , _lowercase :Union[str, Any]=18 , _lowercase :Union[str, Any]=30 , _lowercase :str=400 , _lowercase :Optional[int]=True , _lowercase :List[Any]=None , _lowercase :List[str]=True , _lowercase :Tuple=False , _lowercase :Optional[int]=True , _lowercase :List[str]=True , _lowercase :Optional[Any]=[0.5, 0.5, 0.5] , _lowercase :int=[0.5, 0.5, 0.5] , ) -> List[Any]: UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = num_channels UpperCAmelCase_ = image_size UpperCAmelCase_ = min_resolution UpperCAmelCase_ = max_resolution UpperCAmelCase_ = do_resize UpperCAmelCase_ = size if size is not None else {'''height''': 18, '''width''': 20} UpperCAmelCase_ = do_thumbnail UpperCAmelCase_ = do_align_axis UpperCAmelCase_ = do_pad UpperCAmelCase_ = do_normalize UpperCAmelCase_ = image_mean UpperCAmelCase_ = image_std def __a ( self :Any) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class a_ ( _snake_case , unittest.TestCase ): UpperCamelCase__ : Union[str, Any] =DonutImageProcessor if is_vision_available() else None def __a ( self :List[str]) -> Union[str, Any]: UpperCAmelCase_ = DonutImageProcessingTester(self) @property def __a ( self :Tuple) -> int: return self.image_processor_tester.prepare_image_processor_dict() def __a ( self :List[str]) -> Optional[int]: UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(_lowercase , '''do_resize''')) self.assertTrue(hasattr(_lowercase , '''size''')) self.assertTrue(hasattr(_lowercase , '''do_thumbnail''')) self.assertTrue(hasattr(_lowercase , '''do_align_long_axis''')) self.assertTrue(hasattr(_lowercase , '''do_pad''')) self.assertTrue(hasattr(_lowercase , '''do_normalize''')) self.assertTrue(hasattr(_lowercase , '''image_mean''')) self.assertTrue(hasattr(_lowercase , '''image_std''')) def __a ( self :Tuple) -> int: UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20}) UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42}) # Previous config had dimensions in (width, height) order UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84)) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42}) def __a ( self :int) -> str: pass @is_flaky() def __a ( self :Union[str, Any]) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase) for image in image_inputs: self.assertIsInstance(_lowercase , Image.Image) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def __a ( self :Any) -> List[Any]: # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase) for image in image_inputs: self.assertIsInstance(_lowercase , np.ndarray) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def __a ( self :Optional[int]) -> Dict: # Initialize image_processing UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase) for image in image_inputs: self.assertIsInstance(_lowercase , torch.Tensor) # Test not batched input UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched UpperCAmelCase_ = image_processing(_lowercase , return_tensors='''pt''').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
561
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( "The `image_to_image.py` script is outdated. Please use directly `from diffusers import" " StableDiffusionImg2ImgPipeline` instead." )
561
1
"""simple docstring""" class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : List[Any] = name __UpperCAmelCase : Optional[int] = value __UpperCAmelCase : Any = weight def __repr__( self : List[Any] ): '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.value def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.name def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return self.weight def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return self.value / self.weight def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = [] for i in range(len(_lowerCAmelCase ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = sorted(_lowerCAmelCase , key=_lowerCAmelCase , reverse=_lowerCAmelCase ) __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase ,__UpperCAmelCase : List[str] = 0.0, 0.0 for i in range(len(_lowerCAmelCase ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def lowerCamelCase ( ) -> List[str]: '''simple docstring''' pass if __name__ == "__main__": import doctest doctest.testmod()
139
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) snake_case__ : Dict = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Tuple = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : int = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys snake_case__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
392
0
"""simple docstring""" def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->None: UpperCAmelCase__ = generate_pascal_triangle(_SCREAMING_SNAKE_CASE ) for row_idx in range(_SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) UpperCAmelCase__ = [] for current_row_idx in range(_SCREAMING_SNAKE_CASE ): UpperCAmelCase__ = populate_current_row(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) triangle.append(_SCREAMING_SNAKE_CASE ) return triangle def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: UpperCAmelCase__ = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 UpperCAmelCase__ , UpperCAmelCase__ = 1, 1 for current_col_idx in range(1 , _SCREAMING_SNAKE_CASE ): calculate_current_element( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return current_row def snake_case__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) ->None: UpperCAmelCase__ = triangle[current_row_idx - 1][current_col_idx - 1] UpperCAmelCase__ = triangle[current_row_idx - 1][current_col_idx] UpperCAmelCase__ = above_to_left_elt + above_to_right_elt def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]: if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) UpperCAmelCase__ = [[1]] for row_index in range(1 , _SCREAMING_SNAKE_CASE ): UpperCAmelCase__ = [0] + result[-1] + [0] UpperCAmelCase__ = row_index + 1 # Calculate the number of distinct elements in a row UpperCAmelCase__ = sum(divmod(_SCREAMING_SNAKE_CASE , 2 ) ) UpperCAmelCase__ = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] UpperCAmelCase__ = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() UpperCAmelCase__ = row_first_half + row_second_half result.append(_SCREAMING_SNAKE_CASE ) return result def snake_case__ ( ) ->None: from collections.abc import Callable from timeit import timeit def benchmark_a_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None: UpperCAmelCase__ = F'''{func.__name__}({value})''' UpperCAmelCase__ = timeit(F'''__main__.{call}''' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(1_5 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
716
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer @dataclass class _UpperCamelCase ( __UpperCamelCase ): '''simple docstring''' __lowercase : torch.FloatTensor class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' @register_to_config def __init__( self , __lowercase = 3 , __lowercase = 3 , __lowercase = ("DownEncoderBlock2D",) , __lowercase = ("UpDecoderBlock2D",) , __lowercase = (64,) , __lowercase = 1 , __lowercase = "silu" , __lowercase = 3 , __lowercase = 32 , __lowercase = 256 , __lowercase = 32 , __lowercase = None , __lowercase = 0.18_215 , __lowercase = "group" , ): super().__init__() # pass init params to Encoder UpperCAmelCase__ = Encoder( in_channels=__lowercase , out_channels=__lowercase , down_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , double_z=__lowercase , ) UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels UpperCAmelCase__ = nn.Convad(__lowercase , __lowercase , 1 ) UpperCAmelCase__ = VectorQuantizer(__lowercase , __lowercase , beta=0.25 , remap=__lowercase , sane_index_shape=__lowercase ) UpperCAmelCase__ = nn.Convad(__lowercase , __lowercase , 1 ) # pass init params to Decoder UpperCAmelCase__ = Decoder( in_channels=__lowercase , out_channels=__lowercase , up_block_types=__lowercase , block_out_channels=__lowercase , layers_per_block=__lowercase , act_fn=__lowercase , norm_num_groups=__lowercase , norm_type=__lowercase , ) @apply_forward_hook def A__ ( self , __lowercase , __lowercase = True ): UpperCAmelCase__ = self.encoder(__lowercase ) UpperCAmelCase__ = self.quant_conv(__lowercase ) if not return_dict: return (h,) return VQEncoderOutput(latents=__lowercase ) @apply_forward_hook def A__ ( self , __lowercase , __lowercase = False , __lowercase = True ): # also go through quantization layer if not force_not_quantize: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(__lowercase ) else: UpperCAmelCase__ = h UpperCAmelCase__ = self.post_quant_conv(__lowercase ) UpperCAmelCase__ = self.decoder(__lowercase , quant if self.config.norm_type == """spatial""" else None ) if not return_dict: return (dec,) return DecoderOutput(sample=__lowercase ) def A__ ( self , __lowercase , __lowercase = True ): UpperCAmelCase__ = sample UpperCAmelCase__ = self.encode(__lowercase ).latents UpperCAmelCase__ = self.decode(__lowercase ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__lowercase )
422
0
from ..utils import DummyObject, requires_backends class A ( metaclass=UpperCAmelCase__ ): '''simple docstring''' A__ = ['''onnx'''] def __init__(self : List[Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : Any ) -> List[str]: """simple docstring""" requires_backends(self , ["""onnx"""] ) @classmethod def lowerCamelCase__ (cls : Dict , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ) -> Optional[int]: """simple docstring""" requires_backends(cls , ["""onnx"""] ) @classmethod def lowerCamelCase__ (cls : List[Any] , *_UpperCAmelCase : Dict , **_UpperCAmelCase : str ) -> Any: """simple docstring""" requires_backends(cls , ["""onnx"""] )
15
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): '''simple docstring''' UpperCAmelCase__ = '''maskformer-swin''' UpperCAmelCase__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[int] , lowercase__ : List[Any]=224 , lowercase__ : Optional[Any]=4 , lowercase__ : Optional[Any]=3 , lowercase__ : List[str]=96 , lowercase__ : Dict=[2, 2, 6, 2] , lowercase__ : Tuple=[3, 6, 12, 24] , lowercase__ : Optional[Any]=7 , lowercase__ : Any=4.0 , lowercase__ : List[str]=True , lowercase__ : Optional[int]=0.0 , lowercase__ : Dict=0.0 , lowercase__ : Tuple=0.1 , lowercase__ : Any="gelu" , lowercase__ : Union[str, Any]=False , lowercase__ : Optional[int]=0.0_2 , lowercase__ : Tuple=1e-5 , lowercase__ : Dict=None , lowercase__ : List[Any]=None , **lowercase__ : Optional[Any] , ) ->List[str]: '''simple docstring''' super().__init__(**lowercase__ ) _UpperCamelCase : List[Any] = image_size _UpperCamelCase : Any = patch_size _UpperCamelCase : Union[str, Any] = num_channels _UpperCamelCase : Dict = embed_dim _UpperCamelCase : List[Any] = depths _UpperCamelCase : str = len(lowercase__ ) _UpperCamelCase : List[Any] = num_heads _UpperCamelCase : str = window_size _UpperCamelCase : Optional[Any] = mlp_ratio _UpperCamelCase : Optional[Any] = qkv_bias _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : List[Any] = attention_probs_dropout_prob _UpperCamelCase : Union[str, Any] = drop_path_rate _UpperCamelCase : str = hidden_act _UpperCamelCase : Any = use_absolute_embeddings _UpperCamelCase : Tuple = layer_norm_eps _UpperCamelCase : Union[str, Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCamelCase : Dict = int(embed_dim * 2 ** (len(lowercase__ ) - 1) ) _UpperCamelCase : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowercase__ ) + 1 )] _UpperCamelCase , _UpperCamelCase : Optional[Any] = get_aligned_output_features_output_indices( out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
435
0
import tempfile import numpy as np import torch from transformers import AutoTokenizer, TaEncoderModel from diffusers import DDPMScheduler, UNetaDConditionModel from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.pipelines.deepfloyd_if import IFWatermarker from diffusers.utils.testing_utils import torch_device from ..test_pipelines_common import to_np class __lowerCAmelCase : """simple docstring""" def snake_case_ ( self : str ): torch.manual_seed(0 ) __lowercase : Optional[int] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __lowercase : Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __lowercase : Tuple = UNetaDConditionModel( sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase : List[str] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) __lowercase : str = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def snake_case_ ( self : Optional[int] ): torch.manual_seed(0 ) __lowercase : Optional[int] = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __lowercase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) torch.manual_seed(0 ) __lowercase : Dict = UNetaDConditionModel( sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[ '''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D''', ] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=32 , ) unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests torch.manual_seed(0 ) __lowercase : Dict = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_snake_case , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , ) torch.manual_seed(0 ) __lowercase : Optional[int] = DDPMScheduler( num_train_timesteps=1000 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , ) torch.manual_seed(0 ) __lowercase : Any = IFWatermarker() return { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "image_noising_scheduler": image_noising_scheduler, "watermarker": watermarker, "safety_checker": None, "feature_extractor": None, } def snake_case_ ( self : Any ): __lowercase : Tuple = self.get_dummy_components() __lowercase : Any = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : List[str] = self.get_dummy_inputs(_snake_case ) __lowercase : List[Any] = inputs['''prompt'''] __lowercase : int = inputs['''generator'''] __lowercase : Dict = inputs['''num_inference_steps'''] __lowercase : Optional[int] = inputs['''output_type'''] if "image" in inputs: __lowercase : Tuple = inputs['''image'''] else: __lowercase : Dict = None if "mask_image" in inputs: __lowercase : Tuple = inputs['''mask_image'''] else: __lowercase : List[Any] = None if "original_image" in inputs: __lowercase : List[Any] = inputs['''original_image'''] else: __lowercase : Optional[int] = None __lowercase , __lowercase : Optional[int] = pipe.encode_prompt(_snake_case ) # inputs with prompt converted to embeddings __lowercase : int = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: __lowercase : Tuple = image if mask_image is not None: __lowercase : List[str] = mask_image if original_image is not None: __lowercase : List[Any] = original_image # set all optional components to None for optional_component in pipe._optional_components: setattr(_snake_case , _snake_case , _snake_case ) __lowercase : List[str] = pipe(**_snake_case )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_snake_case ) __lowercase : List[Any] = self.pipeline_class.from_pretrained(_snake_case ) pipe_loaded.to(_snake_case ) pipe_loaded.set_progress_bar_config(disable=_snake_case ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests for optional_component in pipe._optional_components: self.assertTrue( getattr(_snake_case , _snake_case ) is None , F'`{optional_component}` did not stay set to None after loading.' , ) __lowercase : int = self.get_dummy_inputs(_snake_case ) __lowercase : Union[str, Any] = inputs['''generator'''] __lowercase : Any = inputs['''num_inference_steps'''] __lowercase : Tuple = inputs['''output_type'''] # inputs with prompt converted to embeddings __lowercase : List[str] = { '''prompt_embeds''': prompt_embeds, '''negative_prompt_embeds''': negative_prompt_embeds, '''generator''': generator, '''num_inference_steps''': num_inference_steps, '''output_type''': output_type, } if image is not None: __lowercase : Dict = image if mask_image is not None: __lowercase : Tuple = mask_image if original_image is not None: __lowercase : Any = original_image __lowercase : List[str] = pipe_loaded(**_snake_case )[0] __lowercase : Optional[int] = np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max() self.assertLess(_snake_case , 1E-4 ) def snake_case_ ( self : Optional[int] ): __lowercase : Union[str, Any] = self.get_dummy_components() __lowercase : Dict = self.pipeline_class(**_snake_case ) pipe.to(_snake_case ) pipe.set_progress_bar_config(disable=_snake_case ) __lowercase : Dict = self.get_dummy_inputs(_snake_case ) __lowercase : Any = pipe(**_snake_case )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(_snake_case ) __lowercase : Dict = self.pipeline_class.from_pretrained(_snake_case ) pipe_loaded.to(_snake_case ) pipe_loaded.set_progress_bar_config(disable=_snake_case ) pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests __lowercase : Optional[Any] = self.get_dummy_inputs(_snake_case ) __lowercase : str = pipe_loaded(**_snake_case )[0] __lowercase : int = np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max() self.assertLess(_snake_case , 1E-4 )
284
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __lowerCAmelCase : """simple docstring""" def __init__( self : int , _snake_case : Optional[Any] , _snake_case : Union[str, Any]=13 , _snake_case : Optional[Any]=32 , _snake_case : str=2 , _snake_case : Optional[Any]=3 , _snake_case : Tuple=16 , _snake_case : Optional[int]=[1, 2, 1] , _snake_case : Dict=[2, 2, 4] , _snake_case : int=2 , _snake_case : Any=2.0 , _snake_case : Dict=True , _snake_case : Optional[Any]=0.0 , _snake_case : Any=0.0 , _snake_case : str=0.1 , _snake_case : List[Any]="gelu" , _snake_case : str=False , _snake_case : Optional[int]=True , _snake_case : Dict=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : Union[str, Any]=True , _snake_case : int=None , _snake_case : Optional[Any]=True , _snake_case : Optional[Any]=10 , _snake_case : List[Any]=8 , ): __lowercase : str = parent __lowercase : Union[str, Any] = batch_size __lowercase : int = image_size __lowercase : int = patch_size __lowercase : Any = num_channels __lowercase : Optional[int] = embed_dim __lowercase : List[str] = depths __lowercase : List[str] = num_heads __lowercase : Optional[Any] = window_size __lowercase : Union[str, Any] = mlp_ratio __lowercase : int = qkv_bias __lowercase : Tuple = hidden_dropout_prob __lowercase : List[str] = attention_probs_dropout_prob __lowercase : Union[str, Any] = drop_path_rate __lowercase : str = hidden_act __lowercase : Optional[Any] = use_absolute_embeddings __lowercase : Union[str, Any] = patch_norm __lowercase : Any = layer_norm_eps __lowercase : int = initializer_range __lowercase : Optional[Any] = is_training __lowercase : str = scope __lowercase : Any = use_labels __lowercase : Union[str, Any] = type_sequence_label_size __lowercase : Union[str, Any] = encoder_stride def snake_case_ ( self : Union[str, Any] ): __lowercase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowercase : str = None if self.use_labels: __lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Optional[int] = self.get_config() return config, pixel_values, labels def snake_case_ ( self : Tuple ): return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def snake_case_ ( self : int , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : int ): __lowercase : int = SwinvaModel(config=_snake_case ) model.to(_snake_case ) model.eval() __lowercase : Dict = model(_snake_case ) __lowercase : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) __lowercase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def snake_case_ ( self : str , _snake_case : List[Any] , _snake_case : str , _snake_case : str ): __lowercase : List[Any] = SwinvaForMaskedImageModeling(config=_snake_case ) model.to(_snake_case ) model.eval() __lowercase : str = model(_snake_case ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images __lowercase : Optional[Any] = 1 __lowercase : int = SwinvaForMaskedImageModeling(_snake_case ) model.to(_snake_case ) model.eval() __lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) __lowercase : Union[str, Any] = model(_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def snake_case_ ( self : Dict , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Optional[Any] ): __lowercase : Any = self.type_sequence_label_size __lowercase : Optional[Any] = SwinvaForImageClassification(_snake_case ) model.to(_snake_case ) model.eval() __lowercase : List[str] = model(_snake_case , labels=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case_ ( self : Optional[int] ): __lowercase : int = self.prepare_config_and_inputs() __lowercase , __lowercase , __lowercase : Optional[int] = config_and_inputs __lowercase : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" A__ : Optional[Any] = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) A__ : Dict = ( {'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification} if is_torch_available() else {} ) A__ : Any = False A__ : List[str] = False A__ : int = False A__ : Tuple = False def snake_case_ ( self : List[Any] ): __lowercase : Optional[int] = SwinvaModelTester(self ) __lowercase : List[Any] = ConfigTester(self , config_class=_snake_case , embed_dim=37 ) def snake_case_ ( self : Optional[Any] ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def snake_case_ ( self : List[str] ): __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) @unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' ) def snake_case_ ( self : Optional[int] ): pass @unittest.skip(reason='''Swinv2 does not use inputs_embeds''' ) def snake_case_ ( self : Optional[int] ): pass def snake_case_ ( self : Union[str, Any] ): __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Optional[Any] = model_class(_snake_case ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __lowercase : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) ) def snake_case_ ( self : List[Any] ): __lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowercase : Any = model_class(_snake_case ) __lowercase : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowercase : str = [*signature.parameters.keys()] __lowercase : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _snake_case ) def snake_case_ ( self : List[Any] ): __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Dict = True for model_class in self.all_model_classes: __lowercase : List[Any] = True __lowercase : Dict = False __lowercase : Any = True __lowercase : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __lowercase : int = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __lowercase : Any = outputs.attentions __lowercase : List[str] = len(self.model_tester.depths ) self.assertEqual(len(_snake_case ) , _snake_case ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __lowercase : List[str] = True __lowercase : List[Any] = config.window_size**2 __lowercase : int = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __lowercase : str = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __lowercase : Any = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) __lowercase : List[Any] = len(_snake_case ) # Check attention is always last and order is fine __lowercase : Dict = True __lowercase : Dict = True __lowercase : Optional[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __lowercase : Dict = model(**self._prepare_for_class(_snake_case , _snake_case ) ) if hasattr(self.model_tester , '''num_hidden_states_types''' ): __lowercase : int = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states __lowercase : Optional[Any] = 2 self.assertEqual(out_len + added_hidden_states , len(_snake_case ) ) __lowercase : Any = outputs.attentions self.assertEqual(len(_snake_case ) , _snake_case ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def snake_case_ ( self : str , _snake_case : List[str] , _snake_case : Optional[Any] , _snake_case : int , _snake_case : str ): __lowercase : List[Any] = model_class(_snake_case ) model.to(_snake_case ) model.eval() with torch.no_grad(): __lowercase : Optional[int] = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __lowercase : Union[str, Any] = outputs.hidden_states __lowercase : Any = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_snake_case ) , _snake_case ) # Swinv2 has a different seq_length __lowercase : Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowercase : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) __lowercase : Any = outputs.reshaped_hidden_states self.assertEqual(len(_snake_case ) , _snake_case ) __lowercase , __lowercase , __lowercase , __lowercase : str = reshaped_hidden_states[0].shape __lowercase : str = ( reshaped_hidden_states[0].view(_snake_case , _snake_case , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def snake_case_ ( self : int ): __lowercase , __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: __lowercase : List[str] = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : str = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , _snake_case ) def snake_case_ ( self : List[Any] ): __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Any = 3 __lowercase : Optional[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) __lowercase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) __lowercase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) __lowercase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: __lowercase : List[Any] = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowercase : Union[str, Any] = True self.check_hidden_states_output(_snake_case , _snake_case , _snake_case , (padded_height, padded_width) ) def snake_case_ ( self : Optional[int] ): __lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case ) def snake_case_ ( self : Optional[int] ): __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def snake_case_ ( self : Dict ): for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : Optional[Any] = SwinvaModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def snake_case_ ( self : str ): __lowercase , __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[int] = _config_zero_init(_snake_case ) for model_class in self.all_model_classes: __lowercase : Tuple = model_class(config=_snake_case ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @cached_property def snake_case_ ( self : str ): return ( AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ) if is_vision_available() else None ) @slow def snake_case_ ( self : Optional[Any] ): __lowercase : Optional[Any] = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to( _snake_case ) __lowercase : int = self.default_image_processor __lowercase : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) __lowercase : Optional[int] = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case ) # forward pass with torch.no_grad(): __lowercase : Optional[int] = model(**_snake_case ) # verify the logits __lowercase : List[str] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _snake_case ) __lowercase : Optional[Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(_snake_case ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
284
1
'''simple docstring''' from math import ceil def A_ ( snake_case , snake_case ): SCREAMING_SNAKE_CASE:List[str] = list(range(0 , snake_case ) ) SCREAMING_SNAKE_CASE:Union[str, Any] = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check SCREAMING_SNAKE_CASE:Optional[int] = [] for i in device_map_blocks: if device_map_blocks.count(snake_case ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(snake_case ) # Missing blocks SCREAMING_SNAKE_CASE:str = [i for i in blocks if i not in device_map_blocks] SCREAMING_SNAKE_CASE:str = [i for i in device_map_blocks if i not in blocks] if len(snake_case ) != 0: raise ValueError( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device." " These attention blocks were specified more than once: " + str(snake_case ) ) if len(snake_case ) != 0: raise ValueError( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(snake_case ) ) if len(snake_case ) != 0: raise ValueError( "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str(snake_case ) ) def A_ ( snake_case , snake_case ): SCREAMING_SNAKE_CASE:Optional[Any] = list(range(snake_case ) ) SCREAMING_SNAKE_CASE:str = int(ceil(n_layers / len(snake_case ) ) ) SCREAMING_SNAKE_CASE:Any = [layers[i : i + n_blocks] for i in range(0 , snake_case , snake_case )] return dict(zip(snake_case , snake_case ) )
143
'''simple docstring''' import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ): # Load configuration defined in the metadata file with open(snake_case ) as metadata_file: SCREAMING_SNAKE_CASE:str = json.load(snake_case ) SCREAMING_SNAKE_CASE:List[str] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["model_config"] ) # Load in the weights from the checkpoint_path SCREAMING_SNAKE_CASE:Tuple = torch.load(snake_case , map_location="cpu" ) # Load the entity vocab file SCREAMING_SNAKE_CASE:Dict = load_entity_vocab(snake_case ) SCREAMING_SNAKE_CASE:Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] ) # Add special tokens to the token vocabulary for downstream tasks SCREAMING_SNAKE_CASE:Dict = AddedToken("<ent>" , lstrip=snake_case , rstrip=snake_case ) SCREAMING_SNAKE_CASE:List[Any] = AddedToken("<ent2>" , lstrip=snake_case , rstrip=snake_case ) tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' ) tokenizer.save_pretrained(snake_case ) with open(os.path.join(snake_case , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f: json.dump(snake_case , snake_case ) SCREAMING_SNAKE_CASE:str = LukeTokenizer.from_pretrained(snake_case ) # Initialize the embeddings of the special tokens SCREAMING_SNAKE_CASE:Optional[Any] = state_dict["embeddings.word_embeddings.weight"] SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 ) SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 ) SCREAMING_SNAKE_CASE:Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: SCREAMING_SNAKE_CASE:Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.''' SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name] SCREAMING_SNAKE_CASE:List[str] = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks SCREAMING_SNAKE_CASE:str = state_dict["entity_embeddings.entity_embeddings.weight"] SCREAMING_SNAKE_CASE:int = entity_emb[entity_vocab["[MASK]"]] SCREAMING_SNAKE_CASE:str = LukeModel(config=snake_case ).eval() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = model.load_state_dict(snake_case , strict=snake_case ) if not (len(snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'''Missing keys {", ".join(snake_case )}. Expected only missing embeddings.position_ids''' ) if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )): raise ValueError( "Unexpected keys" F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' ) # Check outputs SCREAMING_SNAKE_CASE:Optional[Any] = LukeTokenizer.from_pretrained(snake_case , task="entity_classification" ) SCREAMING_SNAKE_CASE:Tuple = ( "Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the" " new world number one avoid a humiliating second- round exit at Wimbledon ." ) SCREAMING_SNAKE_CASE:List[str] = (39, 42) SCREAMING_SNAKE_CASE:int = tokenizer(snake_case , entity_spans=[span] , add_prefix_space=snake_case , return_tensors="pt" ) SCREAMING_SNAKE_CASE:Any = model(**snake_case ) # Verify word hidden states if model_size == "large": SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 1024) ) SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 768) ) SCREAMING_SNAKE_CASE:int = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ): raise ValueError # Verify entity hidden states if model_size == "large": SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 1, 1024) ) SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 1, 768) ) SCREAMING_SNAKE_CASE:Any = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is''' F''' {expected_shape}''' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print("Saving PyTorch model to {}".format(snake_case ) ) model.save_pretrained(snake_case ) def A_ ( snake_case ): SCREAMING_SNAKE_CASE:Any = {} with open(snake_case , "r" , encoding="utf-8" ) as f: for index, line in enumerate(snake_case ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = line.rstrip().split("\t" ) SCREAMING_SNAKE_CASE:str = index return entity_vocab if __name__ == "__main__": A_ = argparse.ArgumentParser() # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.") parser.add_argument( "--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration." ) parser.add_argument( "--entity_vocab_path", default=None, type=str, help="Path to an entity_vocab.tsv file, containing the entity vocabulary.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model." ) parser.add_argument( "--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted." ) A_ = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
143
1
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" lowerCAmelCase = 0 lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ): return None lowerCAmelCase = sorted_collection[point] if current_item == item: return point else: if point < left: lowerCAmelCase = left lowerCAmelCase = point elif point > right: lowerCAmelCase = right lowerCAmelCase = point else: if item < current_item: lowerCAmelCase = point - 1 else: lowerCAmelCase = point + 1 return None def _snake_case ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: """simple docstring""" # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCAmelCase = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_SCREAMING_SNAKE_CASE ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif point > right: return interpolation_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point - 1 ) else: return interpolation_search_by_recursion( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , point + 1 , _SCREAMING_SNAKE_CASE ) def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> List[Any]: """simple docstring""" if collection != sorted(_SCREAMING_SNAKE_CASE ): raise ValueError("""Collection must be ascending sorted""" ) return True if __name__ == "__main__": import sys UpperCAmelCase = 0 if debug == 1: UpperCAmelCase = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') UpperCAmelCase = 67 UpperCAmelCase = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print('Not found')
708
'''simple docstring''' import csv import tweepy # Twitter API credentials UpperCAmelCase = '' UpperCAmelCase = '' UpperCAmelCase = '' UpperCAmelCase = '' def _snake_case ( _SCREAMING_SNAKE_CASE : str ) -> None: """simple docstring""" # authorize twitter, initialize tweepy lowerCAmelCase = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) lowerCAmelCase = tweepy.API(_SCREAMING_SNAKE_CASE ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 ) # save most recent tweets alltweets.extend(_SCREAMING_SNAKE_CASE ) # save the id of the oldest tweet less one lowerCAmelCase = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_SCREAMING_SNAKE_CASE ) > 0: print(f'getting tweets before {oldest}' ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase = api.user_timeline( screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE ) # save most recent tweets alltweets.extend(_SCREAMING_SNAKE_CASE ) # update the id of the oldest tweet less one lowerCAmelCase = alltweets[-1].id - 1 print(f'...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far' ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'new_{screen_name}_tweets.csv' , """w""" ) as f: lowerCAmelCase = csv.writer(_SCREAMING_SNAKE_CASE ) writer.writerow(["""id""", """created_at""", """text"""] ) writer.writerows(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('FirePing32')
344
0
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def __lowerCAmelCase ( lowercase : Any ) -> Tuple: """simple docstring""" snake_case : Tuple = torch.exp(lowercase ) snake_case : Union[str, Any] = torch.sum(lowercase , dim=1 ) # sum of exp(x_i) snake_case : List[Any] = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i) return torch.log(lowercase ) - B / A class _lowerCAmelCase ( nn.Module ): def __init__( self , UpperCamelCase__ ) -> Any: '''simple docstring''' super().__init__() snake_case : Tuple = config.output_attentions snake_case : Optional[int] = config.output_hidden_states snake_case : List[Any] = nn.ModuleList([BertLayer(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] ) snake_case : Union[str, Any] = nn.ModuleList([BertHighway(UpperCamelCase__ ) for _ in range(config.num_hidden_layers )] ) snake_case : Optional[int] = [-1 for _ in range(config.num_hidden_layers )] def lowerCamelCase ( self , UpperCamelCase__ ) -> Any: '''simple docstring''' if (type(UpperCamelCase__ ) is float) or (type(UpperCamelCase__ ) is int): for i in range(len(self.early_exit_entropy ) ): snake_case : List[Any] = x else: snake_case : Dict = x def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' snake_case : Dict = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> Optional[int]: '''simple docstring''' snake_case : List[Any] = () snake_case : int = () snake_case : Tuple = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: snake_case : List[Any] = all_hidden_states + (hidden_states,) snake_case : Dict = layer_module( UpperCamelCase__ , UpperCamelCase__ , head_mask[i] , UpperCamelCase__ , UpperCamelCase__ ) snake_case : Union[str, Any] = layer_outputs[0] if self.output_attentions: snake_case : Any = all_attentions + (layer_outputs[1],) snake_case : List[Any] = (hidden_states,) if self.output_hidden_states: snake_case : Any = current_outputs + (all_hidden_states,) if self.output_attentions: snake_case : Union[str, Any] = current_outputs + (all_attentions,) snake_case : Union[str, Any] = self.highway[i](UpperCamelCase__ ) # logits, pooled_output if not self.training: snake_case : Tuple = highway_exit[0] snake_case : Optional[int] = entropy(UpperCamelCase__ ) snake_case : Any = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy snake_case : Union[str, Any] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: snake_case : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(UpperCamelCase__ , i + 1 ) else: snake_case : List[Any] = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: snake_case : List[Any] = all_hidden_states + (hidden_states,) snake_case : Dict = (hidden_states,) if self.output_hidden_states: snake_case : str = outputs + (all_hidden_states,) if self.output_attentions: snake_case : Union[str, Any] = outputs + (all_attentions,) snake_case : List[str] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( '''The Bert Model transformer with early exiting (DeeBERT). ''' , snake_case_ , ) class _lowerCAmelCase ( snake_case_ ): def __init__( self , UpperCamelCase__ ) -> Optional[Any]: '''simple docstring''' super().__init__(UpperCamelCase__ ) snake_case : str = config snake_case : Optional[int] = BertEmbeddings(UpperCamelCase__ ) snake_case : str = DeeBertEncoder(UpperCamelCase__ ) snake_case : Union[str, Any] = BertPooler(UpperCamelCase__ ) self.init_weights() def lowerCamelCase ( self ) -> Optional[Any]: '''simple docstring''' self.encoder.init_highway_pooler(self.pooler ) def lowerCamelCase ( self ) -> List[str]: '''simple docstring''' return self.embeddings.word_embeddings def lowerCamelCase ( self , UpperCamelCase__ ) -> Union[str, Any]: '''simple docstring''' snake_case : str = value def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]: '''simple docstring''' for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(UpperCamelCase__ ) @add_start_docstrings_to_model_forward(UpperCamelCase__ ) def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , ) -> int: '''simple docstring''' if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: snake_case : List[str] = input_ids.size() elif inputs_embeds is not None: snake_case : Any = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) snake_case : str = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: snake_case : List[str] = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ ) if encoder_attention_mask is None: snake_case : str = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ ) if token_type_ids is None: snake_case : Any = torch.zeros(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. snake_case : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: snake_case : Optional[int] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: snake_case : List[Any] = encoder_attention_mask[:, None, None, :] snake_case : Union[str, Any] = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility snake_case : List[str] = (1.0 - encoder_extended_attention_mask) * -10000.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] snake_case : Optional[Any] = self.get_head_mask(UpperCamelCase__ , self.config.num_hidden_layers ) snake_case : str = self.embeddings( input_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ ) snake_case : int = self.encoder( UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , ) snake_case : Optional[int] = encoder_outputs[0] snake_case : Tuple = self.pooler(UpperCamelCase__ ) snake_case : Tuple = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class _lowerCAmelCase ( snake_case_ ): def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : Tuple = message snake_case : Union[str, Any] = exit_layer # start from 1! class _lowerCAmelCase ( nn.Module ): def __init__( self , UpperCamelCase__ ) -> Tuple: '''simple docstring''' super().__init__() snake_case : Any = BertPooler(UpperCamelCase__ ) snake_case : Optional[Any] = nn.Dropout(config.hidden_dropout_prob ) snake_case : Optional[int] = nn.Linear(config.hidden_size , config.num_labels ) def lowerCamelCase ( self , UpperCamelCase__ ) -> str: '''simple docstring''' snake_case : List[Any] = encoder_outputs[0] snake_case : Any = self.pooler(UpperCamelCase__ ) # "return" pooler_output # BertModel snake_case : Tuple = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification snake_case : Dict = bmodel_output[1] snake_case : Tuple = self.dropout(UpperCamelCase__ ) snake_case : List[str] = self.classifier(UpperCamelCase__ ) return logits, pooled_output @add_start_docstrings( '''Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. ''' , snake_case_ , ) class _lowerCAmelCase ( snake_case_ ): def __init__( self , UpperCamelCase__ ) -> List[str]: '''simple docstring''' super().__init__(UpperCamelCase__ ) snake_case : List[str] = config.num_labels snake_case : Any = config.num_hidden_layers snake_case : Union[str, Any] = DeeBertModel(UpperCamelCase__ ) snake_case : Dict = nn.Dropout(config.hidden_dropout_prob ) snake_case : str = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase__ ) def lowerCamelCase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=-1 , UpperCamelCase__=False , ) -> Optional[Any]: '''simple docstring''' snake_case : Any = self.num_layers try: snake_case : List[Any] = self.bert( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits snake_case : Union[str, Any] = outputs[1] snake_case : List[str] = self.dropout(UpperCamelCase__ ) snake_case : int = self.classifier(UpperCamelCase__ ) snake_case : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: snake_case : int = e.message snake_case : Dict = e.exit_layer snake_case : int = outputs[0] if not self.training: snake_case : Optional[Any] = entropy(UpperCamelCase__ ) snake_case : Optional[int] = [] snake_case : Union[str, Any] = [] if labels is not None: if self.num_labels == 1: # We are doing regression snake_case : Any = MSELoss() snake_case : Optional[int] = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: snake_case : Optional[int] = CrossEntropyLoss() snake_case : List[str] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits snake_case : str = [] for highway_exit in outputs[-1]: snake_case : List[Any] = highway_exit[0] if not self.training: highway_logits_all.append(UpperCamelCase__ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression snake_case : Any = MSELoss() snake_case : Dict = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: snake_case : Any = CrossEntropyLoss() snake_case : Dict = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(UpperCamelCase__ ) if train_highway: snake_case : str = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: snake_case : Any = (loss,) + outputs if not self.training: snake_case : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: snake_case : Tuple = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
178
"""simple docstring""" import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def __lowerCAmelCase ( lowercase : Optional[int] ) -> Tuple: """simple docstring""" snake_case : Optional[Any] = SwinConfig() snake_case : Union[str, Any] = swin_name.split("_" ) snake_case : str = name_split[1] snake_case : Tuple = int(name_split[4] ) snake_case : str = int(name_split[3][-1] ) if model_size == "tiny": snake_case : str = 96 snake_case : List[Any] = (2, 2, 6, 2) snake_case : Union[str, Any] = (3, 6, 12, 24) elif model_size == "small": snake_case : int = 96 snake_case : Optional[Any] = (2, 2, 18, 2) snake_case : Any = (3, 6, 12, 24) elif model_size == "base": snake_case : List[str] = 128 snake_case : int = (2, 2, 18, 2) snake_case : Any = (4, 8, 16, 32) else: snake_case : List[str] = 192 snake_case : str = (2, 2, 18, 2) snake_case : Any = (6, 12, 24, 48) if "in22k" in swin_name: snake_case : List[Any] = 2_1841 else: snake_case : str = 1000 snake_case : Any = "huggingface/label-files" snake_case : str = "imagenet-1k-id2label.json" snake_case : Optional[int] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) ) snake_case : List[Any] = {int(lowercase ): v for k, v in idalabel.items()} snake_case : Optional[int] = idalabel snake_case : Optional[int] = {v: k for k, v in idalabel.items()} snake_case : Tuple = img_size snake_case : Any = num_classes snake_case : str = embed_dim snake_case : Any = depths snake_case : Dict = num_heads snake_case : List[Any] = window_size return config def __lowerCAmelCase ( lowercase : Optional[Any] ) -> Any: """simple docstring""" if "patch_embed.proj" in name: snake_case : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: snake_case : Union[str, Any] = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: snake_case : Optional[Any] = "encoder." + name if "attn.proj" in name: snake_case : List[Any] = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name: snake_case : List[str] = name.replace("attn" , "attention.self" ) if "norm1" in name: snake_case : List[str] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: snake_case : Optional[int] = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: snake_case : int = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: snake_case : Tuple = name.replace("mlp.fc2" , "output.dense" ) if name == "norm.weight": snake_case : Optional[int] = "layernorm.weight" if name == "norm.bias": snake_case : Tuple = "layernorm.bias" if "head" in name: snake_case : Any = name.replace("head" , "classifier" ) else: snake_case : Tuple = "swin." + name return name def __lowerCAmelCase ( lowercase : List[Any] , lowercase : List[Any] ) -> Any: """simple docstring""" for key in orig_state_dict.copy().keys(): snake_case : int = orig_state_dict.pop(lowercase ) if "mask" in key: continue elif "qkv" in key: snake_case : List[Any] = key.split("." ) snake_case : Dict = int(key_split[1] ) snake_case : Dict = int(key_split[3] ) snake_case : str = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: snake_case : Dict = val[:dim, :] snake_case : Tuple = val[ dim : dim * 2, : ] snake_case : List[str] = val[-dim:, :] else: snake_case : Dict = val[ :dim ] snake_case : List[str] = val[ dim : dim * 2 ] snake_case : Union[str, Any] = val[ -dim: ] else: snake_case : Any = val return orig_state_dict def __lowerCAmelCase ( lowercase : Tuple , lowercase : Any ) -> Dict: """simple docstring""" snake_case : str = timm.create_model(lowercase , pretrained=lowercase ) timm_model.eval() snake_case : int = get_swin_config(lowercase ) snake_case : Optional[int] = SwinForImageClassification(lowercase ) model.eval() snake_case : int = convert_state_dict(timm_model.state_dict() , lowercase ) model.load_state_dict(lowercase ) snake_case : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case : Tuple = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) ) snake_case : Optional[int] = Image.open(requests.get(lowercase , stream=lowercase ).raw ) snake_case : Dict = image_processor(images=lowercase , return_tensors="pt" ) snake_case : str = timm_model(inputs["pixel_values"] ) snake_case : Tuple = model(**lowercase ).logits assert torch.allclose(lowercase , lowercase , atol=1e-3 ) print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowercase ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowercase ) if __name__ == "__main__": __snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swin_name""", default="""swin_tiny_patch4_window7_224""", type=str, help="""Name of the Swin timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __snake_case = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
178
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class __magic_name__ ( unittest.TestCase ): def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :List[str] = '''laion/clap-htsat-unfused''' lowercase :Optional[int] = tempfile.mkdtemp() def __snake_case ( self : Any , **snake_case__ : Union[str, Any] ): '''simple docstring''' return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case__ ) def __snake_case ( self : Union[str, Any] , **snake_case__ : int ): '''simple docstring''' return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __snake_case ( self : str ): '''simple docstring''' lowercase :Optional[int] = self.get_tokenizer() lowercase :Dict = self.get_feature_extractor() lowercase :Optional[int] = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) processor.save_pretrained(self.tmpdirname ) lowercase :List[Any] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :Any = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) lowercase :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase :List[Any] = self.get_feature_extractor(do_normalize=snake_case__ , padding_value=1.0 ) lowercase :Dict = ClapProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Any = self.get_feature_extractor() lowercase :Optional[Any] = self.get_tokenizer() lowercase :List[str] = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) lowercase :Dict = floats_list((3, 1_0_0_0) ) lowercase :List[Any] = feature_extractor(snake_case__ , return_tensors='''np''' ) lowercase :str = processor(audios=snake_case__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Tuple = self.get_feature_extractor() lowercase :Any = self.get_tokenizer() lowercase :List[Any] = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) lowercase :List[str] = '''This is a test string''' lowercase :List[Any] = processor(text=snake_case__ ) lowercase :List[str] = tokenizer(snake_case__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :List[str] = self.get_feature_extractor() lowercase :Tuple = self.get_tokenizer() lowercase :Union[str, Any] = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) lowercase :Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase :Any = processor.batch_decode(snake_case__ ) lowercase :Union[str, Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) def __snake_case ( self : Union[str, Any] ): '''simple docstring''' lowercase :List[str] = self.get_feature_extractor() lowercase :Optional[Any] = self.get_tokenizer() lowercase :Any = ClapProcessor(tokenizer=snake_case__ , feature_extractor=snake_case__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
475
"""simple docstring""" from __future__ import annotations from decimal import Decimal from numpy import array def lowerCamelCase (a_ :list[list[float]]) -> list[list[float]]: lowercase :List[str] = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(a_) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2: # Calculate the determinant of the matrix lowercase :List[str] = float( d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1])) if determinant == 0: raise ValueError('''This matrix has no inverse.''') # Creates a copy of the matrix with swapped positions of the elements lowercase :Optional[Any] = [[0.0, 0.0], [0.0, 0.0]] lowercase , lowercase :List[Any] = matrix[1][1], matrix[0][0] lowercase , lowercase :Union[str, Any] = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(a_)) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(a_) == 3 and len(matrix[0]) == 3 and len(matrix[1]) == 3 and len(matrix[2]) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowercase :Any = float( ( (d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2])) + (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0])) + (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1])) ) - ( (d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0])) + (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2])) + (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1])) )) if determinant == 0: raise ValueError('''This matrix has no inverse.''') # Creating cofactor matrix lowercase :Optional[Any] = [ [d(0.0), d(0.0), d(0.0)], [d(0.0), d(0.0), d(0.0)], [d(0.0), d(0.0), d(0.0)], ] lowercase :Optional[int] = (d(matrix[1][1]) * d(matrix[2][2])) - ( d(matrix[1][2]) * d(matrix[2][1]) ) lowercase :str = -( (d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0])) ) lowercase :Union[str, Any] = (d(matrix[1][0]) * d(matrix[2][1])) - ( d(matrix[1][1]) * d(matrix[2][0]) ) lowercase :Union[str, Any] = -( (d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1])) ) lowercase :Union[str, Any] = (d(matrix[0][0]) * d(matrix[2][2])) - ( d(matrix[0][2]) * d(matrix[2][0]) ) lowercase :List[str] = -( (d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0])) ) lowercase :Union[str, Any] = (d(matrix[0][1]) * d(matrix[1][2])) - ( d(matrix[0][2]) * d(matrix[1][1]) ) lowercase :Union[str, Any] = -( (d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0])) ) lowercase :Any = (d(matrix[0][0]) * d(matrix[1][1])) - ( d(matrix[0][1]) * d(matrix[1][0]) ) # Transpose the cofactor matrix (Adjoint matrix) lowercase :Union[str, Any] = array(a_) for i in range(3): for j in range(3): lowercase :Any = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowercase :str = array(a_) for i in range(3): for j in range(3): inverse_matrix[i][j] /= d(a_) # Calculate the inverse of the matrix return [[float(d(a_)) or 0.0 for n in row] for row in inverse_matrix] raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''')
475
1
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowercase(a_ ): def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" with open(lowerCAmelCase_ , encoding='utf-8' ) as input_file: a__ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' ) a__ = input_file.read() a__ = regexp.search(lowerCAmelCase_ ) return match def lowercase__ ( self , __SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" with open(lowerCAmelCase_ , encoding='utf-8' ) as input_file: a__ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL ) a__ = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` a__ = regexp.finditer(lowerCAmelCase_ ) a__ = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def lowercase__ ( self ) -> Optional[int]: """simple docstring""" a__ = Path('./datasets' ) a__ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(lowerCAmelCase_ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def lowercase__ ( self ) -> Optional[int]: """simple docstring""" a__ = Path('./datasets' ) a__ = list(dataset_paths.absolute().glob('**/*.py' ) ) for dataset in dataset_files: if self._no_print_statements(str(lowerCAmelCase_ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) UpperCAmelCase__ = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase__ = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
186
0
'''simple docstring''' from __future__ import annotations def _A ( A__ , A__ , A__ , ): """simple docstring""" if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative in a semiconductor''' ) elif hole_conc < 0: raise ValueError('''Hole concentration cannot be negative in a semiconductor''' ) elif intrinsic_conc < 0: raise ValueError( '''Intrinsic concentration cannot be negative in a semiconductor''' ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
624
'''simple docstring''' import re def _A ( A__ ): """simple docstring""" __lowercase = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(A__ , A__ ) ) if __name__ == "__main__": lowerCAmelCase__ = '''0094702343221''' print(is_sri_lankan_phone_number(phone))
624
1
from __future__ import annotations def _UpperCAmelCase ( UpperCAmelCase : list , UpperCAmelCase : int ): """simple docstring""" if len(snake_case_ ) <= 1 or n <= 1: return insert_next(snake_case_ , n - 1 ) rec_insertion_sort(snake_case_ , n - 1 ) def _UpperCAmelCase ( UpperCAmelCase : list , UpperCAmelCase : int ): """simple docstring""" if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]: return # Swaps adjacent elements since they are not in ascending order __lowerCamelCase , __lowerCamelCase : List[Any] = ( collection[index], collection[index - 1], ) insert_next(snake_case_ , index + 1 ) if __name__ == "__main__": __UpperCamelCase : Any = input('Enter integers separated by spaces: ') __UpperCamelCase : list[int] = [int(num) for num in numbers.split()] rec_insertion_sort(number_list, len(number_list)) print(number_list)
519
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _lowercase : Any = { 'configuration_poolformer': [ 'POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PoolFormerConfig', 'PoolFormerOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[Any] = ['PoolFormerFeatureExtractor'] _lowercase : Any = ['PoolFormerImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ 'POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PoolFormerForImageClassification', 'PoolFormerModel', 'PoolFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
49
0
import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def A ( snake_case__ : str , snake_case__ : str , snake_case__ : str ) -> Union[str, Any]: '''simple docstring''' def get_masked_lm_array(snake_case__ : str ): __snake_case = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) def get_encoder_array(snake_case__ : str ): __snake_case = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) def get_encoder_layer_array(snake_case__ : int , snake_case__ : str ): __snake_case = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) def get_encoder_attention_layer_array(snake_case__ : int , snake_case__ : str , snake_case__ : Optional[Any] ): __snake_case = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" __snake_case = tf.train.load_variable(snake_case__ , snake_case__ ) __snake_case = array.reshape(snake_case__ ) if "kernel" in name: __snake_case = array.transpose() return torch.from_numpy(snake_case__ ) print(f"Loading model based on config from {config_path}..." ) __snake_case = BertConfig.from_json_file(snake_case__ ) __snake_case = BertForMaskedLM(snake_case__ ) # Layers for layer_index in range(0 , config.num_hidden_layers ): __snake_case = model.bert.encoder.layer[layer_index] # Self-attention __snake_case = layer.attention.self __snake_case = get_encoder_attention_layer_array( snake_case__ , '_query_dense/kernel' , self_attn.query.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_query_dense/bias' , self_attn.query.bias.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_key_dense/kernel' , self_attn.key.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_key_dense/bias' , self_attn.key.bias.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_value_dense/kernel' , self_attn.value.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output __snake_case = layer.attention.output __snake_case = get_encoder_attention_layer_array( snake_case__ , '_output_dense/kernel' , self_output.dense.weight.data.shape ) __snake_case = get_encoder_attention_layer_array( snake_case__ , '_output_dense/bias' , self_output.dense.bias.data.shape ) __snake_case = get_encoder_layer_array(snake_case__ , '_attention_layer_norm/gamma' ) __snake_case = get_encoder_layer_array(snake_case__ , '_attention_layer_norm/beta' ) # Intermediate __snake_case = layer.intermediate __snake_case = get_encoder_layer_array(snake_case__ , '_intermediate_dense/kernel' ) __snake_case = get_encoder_layer_array(snake_case__ , '_intermediate_dense/bias' ) # Output __snake_case = layer.output __snake_case = get_encoder_layer_array(snake_case__ , '_output_dense/kernel' ) __snake_case = get_encoder_layer_array(snake_case__ , '_output_dense/bias' ) __snake_case = get_encoder_layer_array(snake_case__ , '_output_layer_norm/gamma' ) __snake_case = get_encoder_layer_array(snake_case__ , '_output_layer_norm/beta' ) # Embeddings __snake_case = get_encoder_array('_position_embedding_layer/embeddings' ) __snake_case = get_encoder_array('_type_embedding_layer/embeddings' ) __snake_case = get_encoder_array('_embedding_norm_layer/gamma' ) __snake_case = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head __snake_case = model.cls.predictions.transform __snake_case = get_masked_lm_array('dense/kernel' ) __snake_case = get_masked_lm_array('dense/bias' ) __snake_case = get_masked_lm_array('layer_norm/gamma' ) __snake_case = get_masked_lm_array('layer_norm/beta' ) __snake_case = get_masked_lm_array('embedding_table' ) # Pooling __snake_case = BertPooler(config=snake_case__ ) __snake_case = get_encoder_array('_pooler_layer/kernel' ) __snake_case = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(snake_case__ ) # Integration test - should load without any errors ;) __snake_case = BertForMaskedLM.from_pretrained(snake_case__ ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": UpperCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) UpperCAmelCase__ : Optional[int] = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
676
def A ( snake_case__ : int ) -> bool: '''simple docstring''' if p < 2: raise ValueError('p should not be less than 2!' ) elif p == 2: return True __snake_case = 4 __snake_case = (1 << p) - 1 for _ in range(p - 2 ): __snake_case = ((s * s) - 2) % m return s == 0 if __name__ == "__main__": print(lucas_lehmer_test(7)) print(lucas_lehmer_test(11))
676
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : Dict = '''''' __lowerCamelCase : Union[str, Any] = '''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,**SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' super().__init__(self ,**SCREAMING_SNAKE_CASE_ ) snake_case : List[Any] = repo_info snake_case : Dict = token snake_case : Any = None def snake_case_ ( self ): '''simple docstring''' if self.dir_cache is None: snake_case : str = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes snake_case : Union[str, Any] = { """name""": hf_file.rfilename, """size""": None, """type""": """file""", } self.dir_cache.update( { str(SCREAMING_SNAKE_CASE_ ): {"""name""": str(SCREAMING_SNAKE_CASE_ ), """size""": None, """type""": """directory"""} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "rb" ,**SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' if not isinstance(self.repo_info ,SCREAMING_SNAKE_CASE_ ): raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" ) snake_case : Tuple = hf_hub_url(self.repo_info.id ,SCREAMING_SNAKE_CASE_ ,revision=self.repo_info.sha ) return fsspec.open( SCREAMING_SNAKE_CASE_ ,mode=SCREAMING_SNAKE_CASE_ ,headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open() def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' self._get_dirs() snake_case : List[Any] = self._strip_protocol(SCREAMING_SNAKE_CASE_ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=False ,**SCREAMING_SNAKE_CASE_ ): '''simple docstring''' self._get_dirs() snake_case : List[str] = PurePosixPath(path.strip("""/""" ) ) snake_case : Optional[int] = {} for p, f in self.dir_cache.items(): snake_case : List[str] = PurePosixPath(p.strip("""/""" ) ) snake_case : int = p.parent if root == path: snake_case : Any = f snake_case : Optional[Any] = list(paths.values() ) if detail: return out else: return sorted(f["""name"""] for f in out )
36
import numpy as np def lowercase ( __A : np.array ) -> np.array: '''simple docstring''' return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
36
1
"""simple docstring""" def _a ( UpperCAmelCase__ ) -> Dict: if not nums: # Makes sure that the list is not empty raise ValueError('''List is empty''' ) __SCREAMING_SNAKE_CASE = sum(lowerCamelCase__ ) / len(lowerCamelCase__ ) # Calculate the average return sum(abs(x - average ) for x in nums ) / len(lowerCamelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
715
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ ={ "configuration_altclip": [ "ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "AltCLIPConfig", "AltCLIPTextConfig", "AltCLIPVisionConfig", ], "processing_altclip": ["AltCLIPProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ =[ "ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "AltCLIPPreTrainedModel", "AltCLIPModel", "AltCLIPTextModel", "AltCLIPVisionModel", ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys lowerCAmelCase__ =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
0
import argparse import os import torch from transformers import ( XLNetConfig, XLNetForQuestionAnswering, XLNetForSequenceClassification, XLNetLMHeadModel, load_tf_weights_in_xlnet, ) from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging _snake_case : Dict = { "cola": 2, "mnli": 3, "mrpc": 2, "sst-2": 2, "sts-b": 1, "qqp": 2, "qnli": 2, "rte": 2, "wnli": 2, } logging.set_verbosity_info() def __snake_case ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None ): '''simple docstring''' lowercase = XLNetConfig.from_json_file(__magic_name__ ) lowercase = finetuning_task.lower() if finetuning_task is not None else "" if finetuning_task in GLUE_TASKS_NUM_LABELS: print(F'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' ) lowercase = finetuning_task lowercase = GLUE_TASKS_NUM_LABELS[finetuning_task] lowercase = XLNetForSequenceClassification(__magic_name__ ) elif "squad" in finetuning_task: lowercase = finetuning_task lowercase = XLNetForQuestionAnswering(__magic_name__ ) else: lowercase = XLNetLMHeadModel(__magic_name__ ) # Load weights from tf checkpoint load_tf_weights_in_xlnet(__magic_name__ , __magic_name__ , __magic_name__ ) # Save pytorch-model lowercase = os.path.join(__magic_name__ , __magic_name__ ) lowercase = os.path.join(__magic_name__ , __magic_name__ ) print(F'''Save PyTorch model to {os.path.abspath(__magic_name__ )}''' ) torch.save(model.state_dict() , __magic_name__ ) print(F'''Save configuration file to {os.path.abspath(__magic_name__ )}''' ) with open(__magic_name__ , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _snake_case : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--xlnet_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained XLNet model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--finetuning_task", default=None, type=str, help="Name of a task on which the XLNet TensorFlow model was fine-tuned", ) _snake_case : Optional[int] = parser.parse_args() print(args) convert_xlnet_checkpoint_to_pytorch( args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task )
441
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def __snake_case ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' lowercase = [] for _ in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def __snake_case ( __magic_name__ , __magic_name__=10 ): '''simple docstring''' lowercase = [] for step in range(__magic_name__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: lowercase = os.path.join(__magic_name__ , "schedule.bin" ) torch.save(scheduler.state_dict() , __magic_name__ ) lowercase = torch.load(__magic_name__ ) scheduler.load_state_dict(__magic_name__ ) return lrs @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict ) ->Tuple: self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :Optional[int] ) ->Dict: lowercase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__ ) lowercase = torch.tensor([0.4, 0.2, -0.5] ) lowercase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowercase = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): lowercase = criterion(lowerCAmelCase__ , lowerCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def SCREAMING_SNAKE_CASE( self :int ) ->List[Any]: lowercase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase__ ) lowercase = torch.tensor([0.4, 0.2, -0.5] ) lowercase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping lowercase = Adafactor( params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase__ , weight_decay=0.0 , relative_step=lowerCAmelCase__ , scale_parameter=lowerCAmelCase__ , warmup_init=lowerCAmelCase__ , ) for _ in range(1000 ): lowercase = criterion(lowerCAmelCase__ , lowerCAmelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCamelCase : Any = nn.Linear(50 , 50 ) if is_torch_available() else None UpperCamelCase : Union[str, Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None UpperCamelCase : int = 10 def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any=None ) ->Dict: self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ , msg=lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE( self :Any ) ->str: lowercase = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) lowercase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14], ), } for scheduler_func, data in scheds.items(): lowercase , lowercase = data lowercase = scheduler_func(self.optimizer , **lowerCAmelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) lowercase = unwrap_schedule(lowerCAmelCase__ , self.num_steps ) self.assertListAlmostEqual( lowerCAmelCase__ , lowerCAmelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) lowercase = scheduler_func(self.optimizer , **lowerCAmelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase__ ) # wrap to test picklability of the schedule lowercase = unwrap_and_save_reload_schedule(lowerCAmelCase__ , self.num_steps ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class UpperCamelCase_ : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :int ) ->Optional[int]: lowercase = fn def __call__( self :Optional[Any] , *lowerCAmelCase__ :int , **lowerCAmelCase__ :List[str] ) ->List[str]: return self.fn(*lowerCAmelCase__ , **lowerCAmelCase__ ) @classmethod def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :Optional[Any] ) ->str: lowercase = list(map(self , scheduler.lr_lambdas ) )
441
1
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": A = input('Enter image url: ').strip() print(f"""Downloading image from {url} ...""") A = BeautifulSoup(requests.get(url).content, 'html.parser') # The image URL is in the content field of the first meta tag with property og:image A = soup.find('meta', {'property': 'og:image'})['content'] A = requests.get(image_url).content A = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, 'wb') as fp: fp.write(image_data) print(f"""Done. Image saved to disk as {file_name}.""")
97
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class __a ( __A ): '''simple docstring''' def __snake_case ( self ): SCREAMING_SNAKE_CASE_ : Any = SMALL_MODEL_IDENTIFIER SCREAMING_SNAKE_CASE_ : List[Any] = 'pt' SCREAMING_SNAKE_CASE_ : Tuple = 'tf' def __snake_case ( self , UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : Dict = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(UpperCamelCase__ ) def __snake_case ( self , UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=UpperCamelCase__ ) model_tf.save_pretrained(UpperCamelCase__ ) def __snake_case ( self ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'mock_framework' # Framework provided - return whatever the user provides SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = FeaturesManager.determine_framework(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def __snake_case ( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : Any = FeaturesManager.determine_framework(UpperCamelCase__ ) def __snake_case ( self ): SCREAMING_SNAKE_CASE_ : Optional[Any] = MagicMock(return_value=UpperCamelCase__ ) with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : str = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase__ , self.framework_pt ) # PyTorch not in environment -> use TensorFlow SCREAMING_SNAKE_CASE_ : List[str] = MagicMock(return_value=UpperCamelCase__ ) with patch('transformers.onnx.features.is_torch_available' , UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase__ , self.framework_tf ) # Both in environment -> use PyTorch SCREAMING_SNAKE_CASE_ : Union[str, Any] = MagicMock(return_value=UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ ) with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch( 'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(UpperCamelCase__ , self.framework_pt ) # Both not in environment -> raise error SCREAMING_SNAKE_CASE_ : Tuple = MagicMock(return_value=UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ : Any = MagicMock(return_value=UpperCamelCase__ ) with patch('transformers.onnx.features.is_tf_available' , UpperCamelCase__ ), patch( 'transformers.onnx.features.is_torch_available' , UpperCamelCase__ ): with self.assertRaises(UpperCamelCase__ ): SCREAMING_SNAKE_CASE_ : Optional[int] = FeaturesManager.determine_framework(self.test_model )
97
1
'''simple docstring''' lowerCamelCase = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCamelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCamelCase = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
474
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def _A ( _lowerCAmelCase=None , _lowerCAmelCase=None ): """simple docstring""" return field(default_factory=lambda: default , metadata=_lowerCAmelCase ) @dataclass class _UpperCamelCase : '''simple docstring''' lowerCAmelCase__ = field( metadata={"""help""": """The csv file to plot."""} , ) lowerCAmelCase__ = field( default=A , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , ) lowerCAmelCase__ = field( default=A , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , ) lowerCAmelCase__ = field( default=A , metadata={"""help""": """Disable logarithmic scale when plotting"""} , ) lowerCAmelCase__ = field( default=A , metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" } , ) lowerCAmelCase__ = field( default=A , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , ) lowerCAmelCase__ = list_field( default=A , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def _A ( _lowerCAmelCase ): """simple docstring""" try: int(_lowerCAmelCase ) return True except ValueError: return False def _A ( _lowerCAmelCase ): """simple docstring""" try: float(_lowerCAmelCase ) return True except ValueError: return False class _UpperCamelCase : '''simple docstring''' def __init__( self : str , _lowerCAmelCase : Tuple): '''simple docstring''' __lowercase =args __lowercase =defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}}) with open(self.args.csv_file , newline='') as csv_file: __lowercase =csv.DictReader(_lowerCAmelCase) for row in reader: __lowercase =row['model'] self.result_dict[model_name]["bsz"].append(int(row['batch_size'])) self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'])) if can_convert_to_int(row['result']): # value is not None __lowercase =int(row['result']) elif can_convert_to_float(row['result']): # value is not None __lowercase =float(row['result']) def __lowerCamelCase ( self : List[str]): '''simple docstring''' __lowercase , __lowercase =plt.subplots() __lowercase ='Time usage' if self.args.is_time else 'Memory usage' __lowercase =title_str + ' for training' if self.args.is_train else title_str + ' for inference' if not self.args.no_log_scale: # set logarithm scales ax.set_xscale('log') ax.set_yscale('log') for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter()) for model_name_idx, model_name in enumerate(self.result_dict.keys()): __lowercase =sorted(set(self.result_dict[model_name]['bsz'])) __lowercase =sorted(set(self.result_dict[model_name]['seq_len'])) __lowercase =self.result_dict[model_name]['result'] ((__lowercase) , (__lowercase)) =( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) __lowercase =( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: __lowercase =np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_lowerCAmelCase , ) else: __lowercase =np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((__lowercase) , (__lowercase)) =( ('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz') ) __lowercase =np.asarray(_lowerCAmelCase , _lowerCAmelCase)[: len(_lowerCAmelCase)] plt.scatter( _lowerCAmelCase , _lowerCAmelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""") plt.plot(_lowerCAmelCase , _lowerCAmelCase , '--') title_str += f""" {label_model_name} vs.""" __lowercase =title_str[:-4] __lowercase ='Time in s' if self.args.is_time else 'Memory in MB' # plot plt.title(_lowerCAmelCase) plt.xlabel(_lowerCAmelCase) plt.ylabel(_lowerCAmelCase) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file) else: plt.show() def _A ( ): """simple docstring""" __lowercase =HfArgumentParser(_lowerCAmelCase ) __lowercase =parser.parse_args_into_dataclasses()[0] __lowercase =Plot(args=_lowerCAmelCase ) plot.plot() if __name__ == "__main__": main()
474
1
"""simple docstring""" from __future__ import annotations __A = [] def UpperCamelCase ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int , _lowerCAmelCase : int ): for i in range(len(_lowerCAmelCase ) ): if board[row][i] == 1: return False for i in range(len(_lowerCAmelCase ) ): if board[i][column] == 1: return False for i, j in zip(range(_lowerCAmelCase , -1 , -1 ) , range(_lowerCAmelCase , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(_lowerCAmelCase , -1 , -1 ) , range(_lowerCAmelCase , len(_lowerCAmelCase ) ) ): if board[i][j] == 1: return False return True def UpperCamelCase ( _lowerCAmelCase : list[list[int]] , _lowerCAmelCase : int ): if row >= len(_lowerCAmelCase ): solution.append(_lowerCAmelCase ) printboard(_lowerCAmelCase ) print() return True for i in range(len(_lowerCAmelCase ) ): if is_safe(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): __a = 1 solve(_lowerCAmelCase , row + 1 ) __a = 0 return False def UpperCamelCase ( _lowerCAmelCase : list[list[int]] ): for i in range(len(_lowerCAmelCase ) ): for j in range(len(_lowerCAmelCase ) ): if board[i][j] == 1: print("""Q""" , end=""" """ ) else: print(""".""" , end=""" """ ) print() # n=int(input("The no. of queens")) __A = 8 __A = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print("""The total no. of solutions are :""", len(solution))
173
"""simple docstring""" __A = 6_55_21 def UpperCamelCase ( _lowerCAmelCase : str ): __a = 1 __a = 0 for plain_chr in plain_text: __a = (a + ord(_lowerCAmelCase )) % MOD_ADLER __a = (b + a) % MOD_ADLER return (b << 16) | a
173
1
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _a : Tuple = logging.get_logger(__name__) _a : Union[str, Any] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } _a : List[str] = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } _a : List[str] = {'facebook/blenderbot-3B': 1_28} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def UpperCamelCase__ ( ): '''simple docstring''' __lowerCamelCase = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __lowerCamelCase = bs[:] __lowerCamelCase = 0 for b in range(2**8 ): if b not in bs: bs.append(_A ) cs.append(2**8 + n ) n += 1 __lowerCamelCase = [chr(_A ) for n in cs] return dict(zip(_A , _A ) ) def UpperCamelCase__ ( _A: List[Any] ): '''simple docstring''' __lowerCamelCase = set() __lowerCamelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowerCamelCase = char return pairs class UpperCamelCase_ ( __UpperCamelCase ): """simple docstring""" A = VOCAB_FILES_NAMES A = PRETRAINED_VOCAB_FILES_MAP A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A = ['''input_ids''', '''attention_mask'''] def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , **UpperCAmelCase , ): __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __lowerCamelCase = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token super().__init__( errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , ) with open(UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle: __lowerCamelCase = json.load(UpperCAmelCase ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} __lowerCamelCase = errors # how to handle errors in decoding __lowerCamelCase = bytes_to_unicode() __lowerCamelCase = {v: k for k, v in self.byte_encoder.items()} with open(UpperCAmelCase , encoding="""utf-8""" ) as merges_handle: __lowerCamelCase = merges_handle.read().split("""\n""" )[1:-1] __lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges] __lowerCamelCase = dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) __lowerCamelCase = {} __lowerCamelCase = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __lowerCamelCase = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCamelCase_ ( self ): return len(self.encoder ) def lowerCamelCase_ ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase_ ( self , UpperCAmelCase ): if token in self.cache: return self.cache[token] __lowerCamelCase = tuple(UpperCAmelCase ) __lowerCamelCase = get_pairs(UpperCAmelCase ) if not pairs: return token while True: __lowerCamelCase = min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __lowerCamelCase , __lowerCamelCase = bigram __lowerCamelCase = [] __lowerCamelCase = 0 while i < len(UpperCAmelCase ): try: __lowerCamelCase = word.index(UpperCAmelCase , UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowerCamelCase = j if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowerCamelCase = tuple(UpperCAmelCase ) __lowerCamelCase = new_word if len(UpperCAmelCase ) == 1: break else: __lowerCamelCase = get_pairs(UpperCAmelCase ) __lowerCamelCase = """ """.join(UpperCAmelCase ) __lowerCamelCase = word return word def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = [] for token in re.findall(self.pat , UpperCAmelCase ): __lowerCamelCase = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(""" """ ) ) return bpe_tokens def lowerCamelCase_ ( self , UpperCAmelCase ): return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase_ ( self , UpperCAmelCase ): return self.decoder.get(UpperCAmelCase ) def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = """""".join(UpperCAmelCase ) __lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ): if not os.path.isdir(UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __lowerCamelCase = os.path.join( UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __lowerCamelCase = os.path.join( UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + """\n""" ) __lowerCamelCase = 0 with open(UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) __lowerCamelCase = token_index writer.write(""" """.join(UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase )) + [1] return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1] def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ): __lowerCamelCase = [self.sep_token_id] __lowerCamelCase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase=False , **UpperCAmelCase ): __lowerCamelCase = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()): __lowerCamelCase = """ """ + text return (text, kwargs) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = None ): return token_ids_a + [self.eos_token_id] def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text ) else: # Generated responses should contain them already. inputs.append(UpperCAmelCase ) __lowerCamelCase = """ """.join(UpperCAmelCase ) __lowerCamelCase = self.encode(UpperCAmelCase ) if len(UpperCAmelCase ) > self.model_max_length: __lowerCamelCase = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
479
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _a : Any = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class UpperCamelCase_ ( datasets.BuilderConfig ): """simple docstring""" A = None def UpperCamelCase__ ( _A: "pyspark.sql.DataFrame" , _A: List[int] , ): '''simple docstring''' import pyspark def generate_fn(): __lowerCamelCase = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) ) for partition_id in partition_order: __lowerCamelCase = df_with_partition_id.select("""*""" ).where(f'''part_id = {partition_id}''' ).drop("""part_id""" ) __lowerCamelCase = partition_df.collect() __lowerCamelCase = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class UpperCamelCase_ ( _BaseExamplesIterable ): """simple docstring""" def __init__( self , UpperCAmelCase , UpperCAmelCase=None , ): __lowerCamelCase = df __lowerCamelCase = partition_order or range(self.df.rdd.getNumPartitions() ) __lowerCamelCase = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ): yield from self.generate_examples_fn() def lowerCamelCase_ ( self , UpperCAmelCase ): __lowerCamelCase = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase ) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase ): __lowerCamelCase = self.split_shard_indices_by_worker(UpperCAmelCase , UpperCAmelCase ) return SparkExamplesIterable(self.df , partition_order=UpperCAmelCase ) @property def lowerCamelCase_ ( self ): return len(self.partition_order ) class UpperCamelCase_ ( datasets.DatasetBuilder ): """simple docstring""" A = SparkConfig def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ): import pyspark __lowerCamelCase = pyspark.sql.SparkSession.builder.getOrCreate() __lowerCamelCase = df __lowerCamelCase = working_dir super().__init__( cache_dir=UpperCAmelCase , config_name=str(self.df.semanticHash() ) , **UpperCAmelCase , ) def lowerCamelCase_ ( self ): # Returns the path of the created file. def create_cache_and_write_probe(UpperCAmelCase ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=UpperCAmelCase ) __lowerCamelCase = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(UpperCAmelCase , """a""" ) return [probe_file] if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: __lowerCamelCase = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( """When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" ) def lowerCamelCase_ ( self ): return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase_ ( self , UpperCAmelCase ): return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCamelCase_ ( self , UpperCAmelCase ): import pyspark def get_arrow_batch_size(UpperCAmelCase ): for batch in it: yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} ) __lowerCamelCase = self.df.count() __lowerCamelCase = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. __lowerCamelCase = ( self.df.limit(UpperCAmelCase ) .repartition(1 ) .mapInArrow(UpperCAmelCase , """batch_bytes: long""" ) .agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) ) .collect()[0] .sample_bytes / sample_num_rows ) __lowerCamelCase = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. __lowerCamelCase = min(UpperCAmelCase , int(approx_total_size / max_shard_size ) ) __lowerCamelCase = self.df.repartition(UpperCAmelCase ) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ): import pyspark __lowerCamelCase = ParquetWriter if file_format == """parquet""" else ArrowWriter __lowerCamelCase = os.path.join(self._working_dir , os.path.basename(UpperCAmelCase ) ) if self._working_dir else fpath __lowerCamelCase = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. __lowerCamelCase = self.config.features __lowerCamelCase = self._writer_batch_size __lowerCamelCase = self._fs.storage_options def write_arrow(UpperCAmelCase ): # Within the same SparkContext, no two task attempts will share the same attempt ID. __lowerCamelCase = pyspark.TaskContext().taskAttemptId() __lowerCamelCase = next(UpperCAmelCase , UpperCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) __lowerCamelCase = 0 __lowerCamelCase = writer_class( features=UpperCAmelCase , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , ) __lowerCamelCase = pa.Table.from_batches([first_batch] ) writer.write_table(UpperCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: __lowerCamelCase , __lowerCamelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) shard_id += 1 __lowerCamelCase = writer_class( features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=UpperCAmelCase , storage_options=UpperCAmelCase , embed_local_files=UpperCAmelCase , ) __lowerCamelCase = pa.Table.from_batches([batch] ) writer.write_table(UpperCAmelCase ) if writer._num_bytes > 0: __lowerCamelCase , __lowerCamelCase = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(UpperCAmelCase ) ): __lowerCamelCase = os.path.join(os.path.dirname(UpperCAmelCase ) , os.path.basename(UpperCAmelCase ) ) shutil.move(UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase = ( self.df.mapInArrow(UpperCAmelCase , """task_id: long, num_examples: long, num_bytes: long""" ) .groupBy("""task_id""" ) .agg( pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCamelCase_ ( self , UpperCAmelCase , UpperCAmelCase = "arrow" , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ): self._validate_cache_dir() __lowerCamelCase = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(UpperCAmelCase ) __lowerCamelCase = not is_remote_filesystem(self._fs ) __lowerCamelCase = os.path.join if is_local else posixpath.join __lowerCamelCase = """-TTTTT-SSSSS-of-NNNNN""" __lowerCamelCase = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' __lowerCamelCase = path_join(self._output_dir , UpperCAmelCase ) __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = 0 __lowerCamelCase = [] __lowerCamelCase = [] for task_id, content in self._prepare_split_single(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(UpperCAmelCase ) __lowerCamelCase = total_num_examples __lowerCamelCase = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: __lowerCamelCase = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. __lowerCamelCase = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ): rename( UpperCAmelCase , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , ) __lowerCamelCase = [] __lowerCamelCase = 0 for i in range(len(UpperCAmelCase ) ): __lowerCamelCase , __lowerCamelCase = task_id_and_num_shards[i] for shard_id in range(UpperCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(UpperCAmelCase , len(UpperCAmelCase ) ).map(lambda UpperCAmelCase : _rename_shard(*UpperCAmelCase ) ).collect() else: # don't use any pattern __lowerCamelCase = 0 __lowerCamelCase = task_id_and_num_shards[0][0] self._rename( fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(UpperCAmelCase , """""" ) , ) def lowerCamelCase_ ( self , UpperCAmelCase , ): return SparkExamplesIterable(self.df )
479
1
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : int = MgpstrTokenizer a__ : int = False a__ : Any = {} a__ : List[Any] = False def a ( self : int ): super().setUp() # fmt: off __UpperCAmelCase = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on __UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) ) __UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(_lowercase ) + '''\n''' ) def a ( self : Union[str, Any] , **_lowercase : List[Any] ): return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase ) def a ( self : Optional[Any] , _lowercase : Any ): __UpperCAmelCase = '''tester''' __UpperCAmelCase = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def a ( self : Tuple ): pass def a ( self : List[str] ): __UpperCAmelCase = self.get_tokenizers(do_lower_case=_lowercase ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) __UpperCAmelCase = tokenizer.encode([special_token] , add_special_tokens=_lowercase ) self.assertEqual(len(_lowercase ) , 1 ) __UpperCAmelCase = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase ) self.assertTrue(special_token not in decoded ) def a ( self : Any ): __UpperCAmelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase , __UpperCAmelCase = self.get_input_output_texts(_lowercase ) __UpperCAmelCase = tokenizer.tokenize(_lowercase ) __UpperCAmelCase = tokenizer.convert_tokens_to_ids(_lowercase ) __UpperCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) self.assertListEqual(_lowercase , _lowercase ) __UpperCAmelCase = tokenizer.convert_ids_to_tokens(_lowercase ) self.assertNotEqual(len(_lowercase ) , 0 ) __UpperCAmelCase = tokenizer.decode(_lowercase ) self.assertIsInstance(_lowercase , _lowercase ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , _lowercase ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def a ( self : List[str] ): pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def a ( self : Tuple ): pass
708
"""simple docstring""" import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) _lowercase : Optional[Any] = logging.get_logger(__name__) _lowercase : Optional[Any] = OrderedDict( [ ('align', 'EfficientNetImageProcessor'), ('beit', 'BeitImageProcessor'), ('bit', 'BitImageProcessor'), ('blip', 'BlipImageProcessor'), ('blip-2', 'BlipImageProcessor'), ('bridgetower', 'BridgeTowerImageProcessor'), ('chinese_clip', 'ChineseCLIPImageProcessor'), ('clip', 'CLIPImageProcessor'), ('clipseg', 'ViTImageProcessor'), ('conditional_detr', 'ConditionalDetrImageProcessor'), ('convnext', 'ConvNextImageProcessor'), ('convnextv2', 'ConvNextImageProcessor'), ('cvt', 'ConvNextImageProcessor'), ('data2vec-vision', 'BeitImageProcessor'), ('deformable_detr', 'DeformableDetrImageProcessor'), ('deit', 'DeiTImageProcessor'), ('deta', 'DetaImageProcessor'), ('detr', 'DetrImageProcessor'), ('dinat', 'ViTImageProcessor'), ('donut-swin', 'DonutImageProcessor'), ('dpt', 'DPTImageProcessor'), ('efficientformer', 'EfficientFormerImageProcessor'), ('efficientnet', 'EfficientNetImageProcessor'), ('flava', 'FlavaImageProcessor'), ('focalnet', 'BitImageProcessor'), ('git', 'CLIPImageProcessor'), ('glpn', 'GLPNImageProcessor'), ('groupvit', 'CLIPImageProcessor'), ('imagegpt', 'ImageGPTImageProcessor'), ('instructblip', 'BlipImageProcessor'), ('layoutlmv2', 'LayoutLMv2ImageProcessor'), ('layoutlmv3', 'LayoutLMv3ImageProcessor'), ('levit', 'LevitImageProcessor'), ('mask2former', 'Mask2FormerImageProcessor'), ('maskformer', 'MaskFormerImageProcessor'), ('mgp-str', 'ViTImageProcessor'), ('mobilenet_v1', 'MobileNetV1ImageProcessor'), ('mobilenet_v2', 'MobileNetV2ImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevit', 'MobileViTImageProcessor'), ('mobilevitv2', 'MobileViTImageProcessor'), ('nat', 'ViTImageProcessor'), ('oneformer', 'OneFormerImageProcessor'), ('owlvit', 'OwlViTImageProcessor'), ('perceiver', 'PerceiverImageProcessor'), ('pix2struct', 'Pix2StructImageProcessor'), ('poolformer', 'PoolFormerImageProcessor'), ('regnet', 'ConvNextImageProcessor'), ('resnet', 'ConvNextImageProcessor'), ('sam', 'SamImageProcessor'), ('segformer', 'SegformerImageProcessor'), ('swiftformer', 'ViTImageProcessor'), ('swin', 'ViTImageProcessor'), ('swin2sr', 'Swin2SRImageProcessor'), ('swinv2', 'ViTImageProcessor'), ('table-transformer', 'DetrImageProcessor'), ('timesformer', 'VideoMAEImageProcessor'), ('tvlt', 'TvltImageProcessor'), ('upernet', 'SegformerImageProcessor'), ('van', 'ConvNextImageProcessor'), ('videomae', 'VideoMAEImageProcessor'), ('vilt', 'ViltImageProcessor'), ('vit', 'ViTImageProcessor'), ('vit_hybrid', 'ViTHybridImageProcessor'), ('vit_mae', 'ViTImageProcessor'), ('vit_msn', 'ViTImageProcessor'), ('xclip', 'CLIPImageProcessor'), ('yolos', 'YolosImageProcessor'), ] ) _lowercase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def lowercase__ ( snake_case_ :str ): for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: __UpperCAmelCase = model_type_to_module_name(snake_case_ ) __UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(snake_case_ , snake_case_ ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(snake_case_ , '''__name__''' , snake_case_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. __UpperCAmelCase = importlib.import_module('''transformers''' ) if hasattr(snake_case_ , snake_case_ ): return getattr(snake_case_ , snake_case_ ) return None def lowercase__ ( snake_case_ :Union[str, os.PathLike] , snake_case_ :Optional[Union[str, os.PathLike]] = None , snake_case_ :bool = False , snake_case_ :bool = False , snake_case_ :Optional[Dict[str, str]] = None , snake_case_ :Optional[Union[bool, str]] = None , snake_case_ :Optional[str] = None , snake_case_ :bool = False , **snake_case_ :Optional[int] , ): __UpperCAmelCase = get_file_from_repo( snake_case_ , snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , resume_download=snake_case_ , proxies=snake_case_ , use_auth_token=snake_case_ , revision=snake_case_ , local_files_only=snake_case_ , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(snake_case_ , encoding='''utf-8''' ) as reader: return json.load(snake_case_ ) class _UpperCAmelCase : def __init__( self : Optional[int] ): raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(_lowercase ) def a ( cls : Optional[Any] , _lowercase : int , **_lowercase : List[str] ): __UpperCAmelCase = kwargs.pop('''config''' , _lowercase ) __UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _lowercase ) __UpperCAmelCase = True __UpperCAmelCase , __UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(_lowercase , **_lowercase ) __UpperCAmelCase = config_dict.get('''image_processor_type''' , _lowercase ) __UpperCAmelCase = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): __UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: __UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , _lowercase ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) __UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): __UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor'''] __UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(_lowercase , _lowercase ): __UpperCAmelCase = AutoConfig.from_pretrained(_lowercase , **_lowercase ) # It could be in `config.image_processor_type`` __UpperCAmelCase = getattr(_lowercase , '''image_processor_type''' , _lowercase ) if hasattr(_lowercase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: __UpperCAmelCase = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: __UpperCAmelCase = image_processor_class_from_name(_lowercase ) __UpperCAmelCase = image_processor_auto_map is not None __UpperCAmelCase = image_processor_class is not None or type(_lowercase ) in IMAGE_PROCESSOR_MAPPING __UpperCAmelCase = resolve_trust_remote_code( _lowercase , _lowercase , _lowercase , _lowercase ) if has_remote_code and trust_remote_code: __UpperCAmelCase = get_class_from_dynamic_module( _lowercase , _lowercase , **_lowercase ) __UpperCAmelCase = kwargs.pop('''code_revision''' , _lowercase ) if os.path.isdir(_lowercase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(_lowercase , **_lowercase ) elif image_processor_class is not None: return image_processor_class.from_dict(_lowercase , **_lowercase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(_lowercase ) in IMAGE_PROCESSOR_MAPPING: __UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_lowercase )] return image_processor_class.from_dict(_lowercase , **_lowercase ) raise ValueError( F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a ''' F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following ''' F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def a ( _lowercase : Union[str, Any] , _lowercase : Tuple ): IMAGE_PROCESSOR_MAPPING.register(_lowercase , _lowercase )
397
0
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class _lowercase ( __lowercase ): _SCREAMING_SNAKE_CASE : Dict = ["vqvae"] def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : AutoencoderKL , SCREAMING_SNAKE_CASE_ : UNetaDConditionModel , SCREAMING_SNAKE_CASE_ : Mel , SCREAMING_SNAKE_CASE_ : Union[DDIMScheduler, DDPMScheduler] , ) -> List[str]: super().__init__() self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , mel=SCREAMING_SNAKE_CASE_ , vqvae=SCREAMING_SNAKE_CASE_ ) def a ( self : Optional[Any] ) -> int: return 50 if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ) else 1000 @torch.no_grad() def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : np.ndarray = None , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = None , SCREAMING_SNAKE_CASE_ : torch.Generator = None , SCREAMING_SNAKE_CASE_ : float = 0 , SCREAMING_SNAKE_CASE_ : float = 0 , SCREAMING_SNAKE_CASE_ : torch.Generator = None , SCREAMING_SNAKE_CASE_ : float = 0 , SCREAMING_SNAKE_CASE_ : torch.Tensor = None , SCREAMING_SNAKE_CASE_ : torch.Tensor = None , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , ) -> Union[ Union[AudioPipelineOutput, ImagePipelineOutput], Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]], ]: __snake_case = steps or self.get_default_steps() self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) __snake_case = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: __snake_case = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: __snake_case = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) , generator=SCREAMING_SNAKE_CASE_ , device=self.device , ) __snake_case = noise __snake_case = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __snake_case = self.mel.audio_slice_to_image(SCREAMING_SNAKE_CASE_ ) __snake_case = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape( (input_image.height, input_image.width) ) __snake_case = (input_image / 255) * 2 - 1 __snake_case = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device ) if self.vqvae is not None: __snake_case = self.vqvae.encode(torch.unsqueeze(SCREAMING_SNAKE_CASE_ , 0 ) ).latent_dist.sample( generator=SCREAMING_SNAKE_CASE_ )[0] __snake_case = self.vqvae.config.scaling_factor * input_images if start_step > 0: __snake_case = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler.timesteps[start_step - 1] ) __snake_case = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) __snake_case = int(mask_start_secs * pixels_per_second ) __snake_case = int(mask_end_secs * pixels_per_second ) __snake_case = self.scheduler.add_noise(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet , SCREAMING_SNAKE_CASE_ ): __snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample'] else: __snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample'] if isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ): __snake_case = self.scheduler.step( model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['prev_sample'] else: __snake_case = self.scheduler.step( model_output=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , sample=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , )['prev_sample'] if mask is not None: if mask_start > 0: __snake_case = mask[:, step, :, :mask_start] if mask_end > 0: __snake_case = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance __snake_case = 1 / self.vqvae.config.scaling_factor * images __snake_case = self.vqvae.decode(SCREAMING_SNAKE_CASE_ )['sample'] __snake_case = (images / 2 + 0.5).clamp(0 , 1 ) __snake_case = images.cpu().permute(0 , 2 , 3 , 1 ).numpy() __snake_case = (images * 255).round().astype('uint8' ) __snake_case = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(SCREAMING_SNAKE_CASE_ , mode='RGB' ).convert('L' ) for _ in images) ) __snake_case = [self.mel.image_to_audio(SCREAMING_SNAKE_CASE_ ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(SCREAMING_SNAKE_CASE_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(SCREAMING_SNAKE_CASE_ ) ) @torch.no_grad() def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Image.Image] , SCREAMING_SNAKE_CASE_ : int = 50 ) -> np.ndarray: assert isinstance(self.scheduler , SCREAMING_SNAKE_CASE_ ) self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ ) __snake_case = np.array( [np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] ) __snake_case = (sample / 255) * 2 - 1 __snake_case = torch.Tensor(SCREAMING_SNAKE_CASE_ ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ): __snake_case = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps __snake_case = self.scheduler.alphas_cumprod[t] __snake_case = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) __snake_case = 1 - alpha_prod_t __snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['sample'] __snake_case = (1 - alpha_prod_t_prev) ** 0.5 * model_output __snake_case = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) __snake_case = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def a ( SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : float ) -> torch.Tensor: __snake_case = acos(torch.dot(torch.flatten(SCREAMING_SNAKE_CASE_ ) , torch.flatten(SCREAMING_SNAKE_CASE_ ) ) / torch.norm(SCREAMING_SNAKE_CASE_ ) / torch.norm(SCREAMING_SNAKE_CASE_ ) ) return sin((1 - alpha) * theta ) * xa / sin(SCREAMING_SNAKE_CASE_ ) + sin(alpha * theta ) * xa / sin(SCREAMING_SNAKE_CASE_ )
56
'''simple docstring''' from __future__ import annotations import math def _a (lowercase__ : int ) -> bool: """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _a : Dict = [num for num in range(3, 100_001, 2) if not is_prime(num)] def _a (lowercase__ : int ) -> list[int]: """simple docstring""" if not isinstance(lowercase__ , lowercase__ ): raise ValueError('n must be an integer' ) if n <= 0: raise ValueError('n must be >= 0' ) __snake_case = [] for num in range(len(lowercase__ ) ): __snake_case = 0 while 2 * i * i <= odd_composites[num]: __snake_case = odd_composites[num] - 2 * i * i if is_prime(lowercase__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(lowercase__ ) == n: return list_nums return [] def _a () -> int: """simple docstring""" return compute_nums(1 )[0] if __name__ == "__main__": print(f'''{solution() = }''')
56
1
"""simple docstring""" from manim import * class __snake_case( A_ ): '''simple docstring''' def _a ( self ): '''simple docstring''' __A : int = Rectangle(height=0.5 , width=0.5 ) __A : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __A : Any = Rectangle(height=0.25 , width=0.25 ) __A : Optional[int] = [mem.copy() for i in range(6 )] __A : Optional[int] = [mem.copy() for i in range(6 )] __A : Optional[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : Union[str, Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : List[str] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : List[str] = Text('CPU' , font_size=24 ) __A : List[str] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__lowerCamelCase ) __A : Dict = [mem.copy() for i in range(4 )] __A : Tuple = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : Optional[int] = Text('GPU' , font_size=24 ) __A : Optional[Any] = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) gpu.move_to([-1, -1, 0] ) self.add(__lowerCamelCase ) __A : Union[str, Any] = [mem.copy() for i in range(6 )] __A : Tuple = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : Optional[Any] = Text('Model' , font_size=24 ) __A : str = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) model.move_to([3, -1.0, 0] ) self.add(__lowerCamelCase ) __A : Any = [] __A : Union[str, Any] = [] for i, rect in enumerate(__lowerCamelCase ): __A : str = fill.copy().set_fill(__lowerCamelCase , opacity=0.8 ) target.move_to(__lowerCamelCase ) model_arr.append(__lowerCamelCase ) __A : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowerCamelCase , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(__lowerCamelCase ) self.add(*__lowerCamelCase , *__lowerCamelCase ) __A : Union[str, Any] = [meta_mem.copy() for i in range(6 )] __A : int = [meta_mem.copy() for i in range(6 )] __A : Dict = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : Optional[Any] = VGroup(*__lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : List[Any] = VGroup(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0 ) __A : Optional[Any] = Text('Disk' , font_size=24 ) __A : int = Group(__lowerCamelCase , __lowerCamelCase ).arrange(__lowerCamelCase , buff=0.5 , aligned_edge=__lowerCamelCase ) disk.move_to([-4, -1.25, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) __A : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __A : int = MarkupText( F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__lowerCamelCase , __lowerCamelCase ) __A : int = MarkupText( F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , ) blue_text.next_to(__lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__lowerCamelCase ) __A : Optional[Any] = MarkupText( F'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase ) ) __A : int = Square(0.3 ) input.set_fill(__lowerCamelCase , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , __lowerCamelCase , buff=0.5 ) self.play(Write(__lowerCamelCase ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=__lowerCamelCase , buff=0.02 ) self.play(MoveToTarget(__lowerCamelCase ) ) self.play(FadeOut(__lowerCamelCase ) ) __A : Optional[Any] = Arrow(start=__lowerCamelCase , end=__lowerCamelCase , color=__lowerCamelCase , buff=0.5 ) a.next_to(model_arr[0].get_left() , __lowerCamelCase , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) __A : List[str] = MarkupText( F'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase , run_time=3 ) ) __A : int = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02} self.play( Write(__lowerCamelCase ) , Circumscribe(model_arr[0] , color=__lowerCamelCase , **__lowerCamelCase ) , Circumscribe(model_cpu_arr[0] , color=__lowerCamelCase , **__lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCamelCase , **__lowerCamelCase ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) __A : Dict = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.02 , __lowerCamelCase , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.02 ) __A : Any = AnimationGroup( FadeOut(__lowerCamelCase , run_time=0.5 ) , MoveToTarget(__lowerCamelCase , run_time=0.5 ) , FadeIn(__lowerCamelCase , run_time=0.5 ) , lag_ratio=0.2 ) self.play(__lowerCamelCase ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: __A : List[Any] = 0.7 self.play( Circumscribe(model_arr[i] , **__lowerCamelCase ) , Circumscribe(cpu_left_col_base[i] , **__lowerCamelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=__lowerCamelCase , **__lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCamelCase , **__lowerCamelCase ) , Circumscribe(model_arr[i + 1] , color=__lowerCamelCase , **__lowerCamelCase ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=__lowerCamelCase , **__lowerCamelCase ) , Circumscribe(cpu_left_col_base[-1] , color=__lowerCamelCase , **__lowerCamelCase ) , Circumscribe(gpu_rect[0] , color=__lowerCamelCase , **__lowerCamelCase ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) __A : Optional[int] = a_c __A : Dict = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 ) self.play( FadeOut(__lowerCamelCase ) , FadeOut(__lowerCamelCase , run_time=0.5 ) , ) __A : Tuple = MarkupText(F'Inference on a model too large for GPU memory\nis successfully completed.' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__lowerCamelCase , run_time=3 ) , MoveToTarget(__lowerCamelCase ) ) self.wait()
237
"""simple docstring""" import os from datetime import datetime as dt from github import Github lowerCamelCase : Dict =[ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def _lowercase ( ) -> Optional[Any]: '''simple docstring''' __A : Union[str, Any] = Github(os.environ['GITHUB_TOKEN'] ) __A : Union[str, Any] = g.get_repo('huggingface/diffusers' ) __A : Optional[int] = repo.get_issues(state='open' ) for issue in open_issues: __A : Any = sorted(issue.get_comments() , key=lambda _SCREAMING_SNAKE_CASE : i.created_at , reverse=_SCREAMING_SNAKE_CASE ) __A : Optional[int] = comments[0] if len(_SCREAMING_SNAKE_CASE ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='closed' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='open' ) issue.remove_from_labels('stale' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) issue.add_to_labels('stale' ) if __name__ == "__main__": main()
237
1
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowercase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): """simple docstring""" __lowerCAmelCase = AltDiffusionPipeline __lowerCAmelCase = TEXT_TO_IMAGE_PARAMS __lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS __lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS __lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def __UpperCAmelCase ( self : Dict ) -> Optional[int]: torch.manual_seed(0 ) _A = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, ) _A = DDIMScheduler( beta_start=0.00_085, beta_end=0.012, beta_schedule='scaled_linear', clip_sample=SCREAMING_SNAKE_CASE_, set_alpha_to_one=SCREAMING_SNAKE_CASE_, ) torch.manual_seed(0 ) _A = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) _A = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=50_02, ) _A = CLIPTextModel(SCREAMING_SNAKE_CASE_ ) _A = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' ) _A = 77 _A = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : int=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): _A = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: _A = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) _A = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def __UpperCAmelCase ( self : Optional[Any] ) -> str: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def __UpperCAmelCase ( self : int ) -> int: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __UpperCAmelCase ( self : Optional[int] ) -> int: _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() torch.manual_seed(0 ) _A = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=50_02, ) # TODO: remove after fixing the non-deterministic text encoder _A = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ ) _A = text_encoder _A = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE_ ) _A = alt_pipe.to(SCREAMING_SNAKE_CASE_ ) alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) _A = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) _A = 'A photo of an astronaut' _A = alt_pipe(**SCREAMING_SNAKE_CASE_ ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array( [0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : List[str] ) -> Any: _A = 'cpu' # ensure determinism for the device-dependent torch.Generator _A = self.get_dummy_components() _A = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ ) torch.manual_seed(0 ) _A = RobertaSeriesConfig( hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, vocab_size=50_02, ) # TODO: remove after fixing the non-deterministic text encoder _A = RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ ) _A = text_encoder _A = AltDiffusionPipeline(**SCREAMING_SNAKE_CASE_ ) _A = alt_pipe.to(SCREAMING_SNAKE_CASE_ ) alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) _A = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) _A = alt_pipe(**SCREAMING_SNAKE_CASE_ ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _A = np.array( [0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class lowercase_ ( unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Any ) -> Any: # make sure here that pndm scheduler skips prk _A = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion', safety_checker=SCREAMING_SNAKE_CASE_ ) _A = alt_pipe.to(SCREAMING_SNAKE_CASE_ ) alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) _A = 'A painting of a squirrel eating a burger' _A = torch.manual_seed(0 ) _A = alt_pipe([prompt], generator=SCREAMING_SNAKE_CASE_, guidance_scale=6.0, num_inference_steps=20, output_type='np' ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]: _A = DDIMScheduler.from_pretrained('BAAI/AltDiffusion', subfolder='scheduler' ) _A = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion', scheduler=SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_ ) _A = alt_pipe.to(SCREAMING_SNAKE_CASE_ ) alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) _A = 'A painting of a squirrel eating a burger' _A = torch.manual_seed(0 ) _A = alt_pipe([prompt], generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='numpy' ) _A = output.images _A = image[0, -3:, -3:, -1] assert image.shape == (1, 5_12, 5_12, 3) _A = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
107
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class _UpperCamelCase ( lowerCamelCase__ ): '''simple docstring''' _A = 42 class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ): '''simple docstring''' @register_to_config def __init__( self : int , SCREAMING_SNAKE_CASE_ : int = 6_5_5_3_6 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : str = "fourier" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , SCREAMING_SNAKE_CASE_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , SCREAMING_SNAKE_CASE_ : Tuple[str] = "UNetMidBlock1D" , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : Tuple[int] = (3_2, 3_2, 6_4) , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : int = 8 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : bool = False , ): super().__init__() _a = sample_size # time if time_embedding_type == "fourier": _a = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=SCREAMING_SNAKE_CASE_ , log=SCREAMING_SNAKE_CASE_ , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ ) _a = 2 * block_out_channels[0] elif time_embedding_type == "positional": _a = Timesteps( block_out_channels[0] , flip_sin_to_cos=SCREAMING_SNAKE_CASE_ , downscale_freq_shift=SCREAMING_SNAKE_CASE_ ) _a = block_out_channels[0] if use_timestep_embedding: _a = block_out_channels[0] * 4 _a = TimestepEmbedding( in_channels=SCREAMING_SNAKE_CASE_ , time_embed_dim=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , out_dim=block_out_channels[0] , ) _a = nn.ModuleList([] ) _a = None _a = nn.ModuleList([] ) _a = None # down _a = in_channels for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): _a = output_channel _a = block_out_channels[i] if i == 0: input_channel += extra_in_channels _a = i == len(SCREAMING_SNAKE_CASE_ ) - 1 _a = get_down_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(SCREAMING_SNAKE_CASE_ ) # mid _a = get_mid_block( SCREAMING_SNAKE_CASE_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=SCREAMING_SNAKE_CASE_ , add_downsample=SCREAMING_SNAKE_CASE_ , ) # up _a = list(reversed(SCREAMING_SNAKE_CASE_ ) ) _a = reversed_block_out_channels[0] if out_block_type is None: _a = out_channels else: _a = block_out_channels[0] for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE_ ): _a = output_channel _a = ( reversed_block_out_channels[i + 1] if i < len(SCREAMING_SNAKE_CASE_ ) - 1 else final_upsample_channels ) _a = i == len(SCREAMING_SNAKE_CASE_ ) - 1 _a = get_up_block( SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(SCREAMING_SNAKE_CASE_ ) _a = output_channel # out _a = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 ) _a = get_out_block( out_block_type=SCREAMING_SNAKE_CASE_ , num_groups_out=SCREAMING_SNAKE_CASE_ , embed_dim=block_out_channels[0] , out_channels=SCREAMING_SNAKE_CASE_ , act_fn=SCREAMING_SNAKE_CASE_ , fc_dim=block_out_channels[-1] // 4 , ) def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Union[torch.Tensor, float, int] , SCREAMING_SNAKE_CASE_ : bool = True , ): _a = timestep if not torch.is_tensor(SCREAMING_SNAKE_CASE_ ): _a = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(SCREAMING_SNAKE_CASE_ ) and len(timesteps.shape ) == 0: _a = timesteps[None].to(sample.device ) _a = self.time_proj(SCREAMING_SNAKE_CASE_ ) if self.config.use_timestep_embedding: _a = self.time_mlp(SCREAMING_SNAKE_CASE_ ) else: _a = timestep_embed[..., None] _a = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) _a = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down _a = () for downsample_block in self.down_blocks: _a , _a = downsample_block(hidden_states=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: _a = self.mid_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): _a = down_block_res_samples[-1:] _a = down_block_res_samples[:-1] _a = upsample_block(SCREAMING_SNAKE_CASE_ , res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ , temb=SCREAMING_SNAKE_CASE_ ) # 5. post-process if self.out_block: _a = self.out_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if not return_dict: return (sample,) return UNetaDOutput(sample=SCREAMING_SNAKE_CASE_ )
562
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..bit import BitConfig lowercase =logging.get_logger(__name__) lowercase ={ 'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json', # See all DPT models at https://huggingface.co/models?filter=dpt } class __magic_name__ ( lowerCAmelCase ): UpperCAmelCase ="dpt" def __init__( self , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-1_2 , snake_case=3_8_4 , snake_case=1_6 , snake_case=3 , snake_case=False , snake_case=True , snake_case=[2, 5, 8, 1_1] , snake_case="project" , snake_case=[4, 2, 1, 0.5] , snake_case=[9_6, 1_9_2, 3_8_4, 7_6_8] , snake_case=2_5_6 , snake_case=-1 , snake_case=False , snake_case=True , snake_case=0.4 , snake_case=2_5_5 , snake_case=0.1 , snake_case=[1, 1_0_2_4, 2_4, 2_4] , snake_case=[0, 1] , snake_case=None , **snake_case , ) -> Any: '''simple docstring''' super().__init__(**snake_case) _UpperCAmelCase : Dict =hidden_size _UpperCAmelCase : int =is_hybrid if self.is_hybrid: if backbone_config is None: logger.info('Initializing the config with a `BiT` backbone.') _UpperCAmelCase : Tuple ={ 'global_padding': 'same', 'layer_type': 'bottleneck', 'depths': [3, 4, 9], 'out_features': ['stage1', 'stage2', 'stage3'], 'embedding_dynamic_padding': True, } _UpperCAmelCase : Tuple =BitConfig(**snake_case) elif isinstance(snake_case , snake_case): logger.info('Initializing the config with a `BiT` backbone.') _UpperCAmelCase : Union[str, Any] =BitConfig(**snake_case) elif isinstance(snake_case , snake_case): _UpperCAmelCase : Optional[int] =backbone_config else: raise ValueError( f"backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.") _UpperCAmelCase : Optional[int] =backbone_featmap_shape _UpperCAmelCase : Optional[int] =neck_ignore_stages if readout_type != "project": raise ValueError('Readout type must be \'project\' when using `DPT-hybrid` mode.') else: _UpperCAmelCase : int =None _UpperCAmelCase : Dict =None _UpperCAmelCase : Tuple =[] _UpperCAmelCase : Optional[Any] =num_hidden_layers _UpperCAmelCase : Optional[Any] =num_attention_heads _UpperCAmelCase : Dict =intermediate_size _UpperCAmelCase : Optional[Any] =hidden_act _UpperCAmelCase : Any =hidden_dropout_prob _UpperCAmelCase : Any =attention_probs_dropout_prob _UpperCAmelCase : Optional[int] =initializer_range _UpperCAmelCase : List[str] =layer_norm_eps _UpperCAmelCase : str =image_size _UpperCAmelCase : Optional[int] =patch_size _UpperCAmelCase : str =num_channels _UpperCAmelCase : int =qkv_bias _UpperCAmelCase : int =backbone_out_indices if readout_type not in ["ignore", "add", "project"]: raise ValueError('Readout_type must be one of [\'ignore\', \'add\', \'project\']') _UpperCAmelCase : Any =readout_type _UpperCAmelCase : Any =reassemble_factors _UpperCAmelCase : Optional[int] =neck_hidden_sizes _UpperCAmelCase : Tuple =fusion_hidden_size _UpperCAmelCase : str =head_in_index _UpperCAmelCase : Dict =use_batch_norm_in_fusion_residual # auxiliary head attributes (semantic segmentation) _UpperCAmelCase : Tuple =use_auxiliary_head _UpperCAmelCase : Tuple =auxiliary_loss_weight _UpperCAmelCase : Optional[int] =semantic_loss_ignore_index _UpperCAmelCase : Any =semantic_classifier_dropout def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] =copy.deepcopy(self.__dict__) if output["backbone_config"] is not None: _UpperCAmelCase : Any =self.backbone_config.to_dict() _UpperCAmelCase : int =self.__class__.model_type return output
331
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) lowercase =logging.getLogger(__name__) def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Optional[Any] ): '''simple docstring''' _UpperCAmelCase : Any =np.argmax(__lowerCamelCase , axis=1 ) return np.sum(outputs == labels ) def lowerCamelCase__ ( __lowerCamelCase : List[Any] ): '''simple docstring''' with open(__lowerCamelCase , encoding='utf_8' ) as f: _UpperCAmelCase : Union[str, Any] =csv.reader(__lowerCamelCase ) _UpperCAmelCase : Optional[int] =[] next(__lowerCamelCase ) # skip the first line for line in tqdm(__lowerCamelCase ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] ): '''simple docstring''' _UpperCAmelCase : List[Any] =[] for dataset in encoded_datasets: _UpperCAmelCase : Any =len(__lowerCamelCase ) _UpperCAmelCase : Optional[Any] =np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) _UpperCAmelCase : Optional[int] =np.zeros((n_batch, 2) , dtype=np.intaa ) _UpperCAmelCase : Dict =np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) _UpperCAmelCase : str =np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(__lowerCamelCase ): _UpperCAmelCase : int =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _UpperCAmelCase : str =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] _UpperCAmelCase : Optional[int] =with_conta _UpperCAmelCase : str =with_conta _UpperCAmelCase : str =len(__lowerCamelCase ) - 1 _UpperCAmelCase : Optional[Any] =len(__lowerCamelCase ) - 1 _UpperCAmelCase : Any =with_conta _UpperCAmelCase : Optional[Any] =with_conta _UpperCAmelCase : str =mc_label _UpperCAmelCase : Union[str, Any] =(input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(__lowerCamelCase ) for t in all_inputs ) ) return tensor_datasets def lowerCamelCase__ ( ): '''simple docstring''' _UpperCAmelCase : str =argparse.ArgumentParser() parser.add_argument('--model_name' , type=__lowerCamelCase , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=__lowerCamelCase , default='' ) parser.add_argument('--eval_dataset' , type=__lowerCamelCase , default='' ) parser.add_argument('--seed' , type=__lowerCamelCase , default=4_2 ) parser.add_argument('--num_train_epochs' , type=__lowerCamelCase , default=3 ) parser.add_argument('--train_batch_size' , type=__lowerCamelCase , default=8 ) parser.add_argument('--eval_batch_size' , type=__lowerCamelCase , default=1_6 ) parser.add_argument('--adam_epsilon' , default=1e-8 , type=__lowerCamelCase , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=__lowerCamelCase , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=__lowerCamelCase , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=__lowerCamelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=__lowerCamelCase , default=6.25e-5 ) parser.add_argument('--warmup_steps' , default=0 , type=__lowerCamelCase , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=__lowerCamelCase , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=__lowerCamelCase , default=0.01 ) parser.add_argument('--lm_coef' , type=__lowerCamelCase , default=0.9 ) parser.add_argument('--n_valid' , type=__lowerCamelCase , default=3_7_4 ) parser.add_argument('--server_ip' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' ) _UpperCAmelCase : Optional[Any] =parser.parse_args() print(__lowerCamelCase ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) _UpperCAmelCase : Any =torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) _UpperCAmelCase : Tuple =torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(__lowerCamelCase , __lowerCamelCase ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset _UpperCAmelCase : List[str] =['_start_', '_delimiter_', '_classify_'] _UpperCAmelCase : Any =OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(__lowerCamelCase ) _UpperCAmelCase : str =tokenizer.convert_tokens_to_ids(__lowerCamelCase ) _UpperCAmelCase : Optional[Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(__lowerCamelCase ) ) model.to(__lowerCamelCase ) # Load and encode the datasets def tokenize_and_encode(__lowerCamelCase : Dict ): if isinstance(__lowerCamelCase , __lowerCamelCase ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowerCamelCase ) ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): return obj return [tokenize_and_encode(__lowerCamelCase ) for o in obj] logger.info('Encoding dataset...' ) _UpperCAmelCase : int =load_rocstories_dataset(args.train_dataset ) _UpperCAmelCase : Dict =load_rocstories_dataset(args.eval_dataset ) _UpperCAmelCase : Optional[Any] =(train_dataset, eval_dataset) _UpperCAmelCase : Dict =tokenize_and_encode(__lowerCamelCase ) # Compute the max input length for the Transformer _UpperCAmelCase : int =model.config.n_positions // 2 - 2 _UpperCAmelCase : str =max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) _UpperCAmelCase : Tuple =min(__lowerCamelCase , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders _UpperCAmelCase : Any =pre_process_datasets(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , *__lowerCamelCase ) _UpperCAmelCase , _UpperCAmelCase : str =tensor_datasets[0], tensor_datasets[1] _UpperCAmelCase : int =TensorDataset(*__lowerCamelCase ) _UpperCAmelCase : Optional[int] =RandomSampler(__lowerCamelCase ) _UpperCAmelCase : Optional[Any] =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.train_batch_size ) _UpperCAmelCase : Dict =TensorDataset(*__lowerCamelCase ) _UpperCAmelCase : str =SequentialSampler(__lowerCamelCase ) _UpperCAmelCase : int =DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: _UpperCAmelCase : List[Any] =args.max_steps _UpperCAmelCase : Tuple =args.max_steps // (len(__lowerCamelCase ) // args.gradient_accumulation_steps) + 1 else: _UpperCAmelCase : int =len(__lowerCamelCase ) // args.gradient_accumulation_steps * args.num_train_epochs _UpperCAmelCase : int =list(model.named_parameters() ) _UpperCAmelCase : Optional[int] =['bias', 'LayerNorm.bias', 'LayerNorm.weight'] _UpperCAmelCase : int =[ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] _UpperCAmelCase : Optional[Any] =AdamW(__lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon ) _UpperCAmelCase : Dict =get_linear_schedule_with_warmup( __lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=__lowerCamelCase ) if args.do_train: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): _UpperCAmelCase : Union[str, Any] =0 _UpperCAmelCase : Tuple =0 _UpperCAmelCase : Any =tqdm(__lowerCamelCase , desc='Training' ) for step, batch in enumerate(__lowerCamelCase ): _UpperCAmelCase : List[str] =tuple(t.to(__lowerCamelCase ) for t in batch ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =batch _UpperCAmelCase : List[str] =model(__lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase ) _UpperCAmelCase : List[str] =args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() _UpperCAmelCase : Union[str, Any] =( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 _UpperCAmelCase : str ='Training loss: {:.2e} lr: {:.2e}'.format(__lowerCamelCase , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer _UpperCAmelCase : Dict =model.module if hasattr(__lowerCamelCase , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` _UpperCAmelCase : List[str] =os.path.join(args.output_dir , __lowerCamelCase ) _UpperCAmelCase : Optional[int] =os.path.join(args.output_dir , __lowerCamelCase ) torch.save(model_to_save.state_dict() , __lowerCamelCase ) model_to_save.config.to_json_file(__lowerCamelCase ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned _UpperCAmelCase : int =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) _UpperCAmelCase : Optional[int] =OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(__lowerCamelCase ) if args.do_eval: model.eval() _UpperCAmelCase , _UpperCAmelCase : Optional[Any] =0, 0 _UpperCAmelCase , _UpperCAmelCase : Dict =0, 0 for batch in tqdm(__lowerCamelCase , desc='Evaluating' ): _UpperCAmelCase : str =tuple(t.to(__lowerCamelCase ) for t in batch ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =batch with torch.no_grad(): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict =model( __lowerCamelCase , mc_token_ids=__lowerCamelCase , lm_labels=__lowerCamelCase , mc_labels=__lowerCamelCase ) _UpperCAmelCase : str =mc_logits.detach().cpu().numpy() _UpperCAmelCase : Optional[Any] =mc_labels.to('cpu' ).numpy() _UpperCAmelCase : Tuple =accuracy(__lowerCamelCase , __lowerCamelCase ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 _UpperCAmelCase : List[Any] =eval_loss / nb_eval_steps _UpperCAmelCase : Union[str, Any] =eval_accuracy / nb_eval_examples _UpperCAmelCase : Optional[Any] =tr_loss / nb_tr_steps if args.do_train else None _UpperCAmelCase : Optional[Any] ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} _UpperCAmelCase : Optional[Any] =os.path.join(args.output_dir , 'eval_results.txt' ) with open(__lowerCamelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , __lowerCamelCase , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
331
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __a: Tuple = logging.get_logger(__name__) class UpperCAmelCase ( a__ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
152
'''simple docstring''' import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase ( a__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = LayoutLMTokenizer SCREAMING_SNAKE_CASE = LayoutLMTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = True def _lowerCAmelCase( self ) -> Dict: super().setUp() lowercase__ : Any = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[int]: return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase ) def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[Any]: lowercase__ : Union[str, Any] = '''UNwant\u00E9d,running''' lowercase__ : int = '''unwanted, running''' return input_text, output_text def _lowerCAmelCase( self ) -> int: lowercase__ : List[str] = self.tokenizer_class(self.vocab_file ) lowercase__ : List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' ) self.assertListEqual(__lowerCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [7, 4, 5, 10, 8, 9] ) def _lowerCAmelCase( self ) -> Union[str, Any]: pass
152
1
'''simple docstring''' import math def _lowercase ( UpperCamelCase__ : Dict ): __A : int = [] __A : Optional[int] = 2 __A : List[str] = int(math.sqrt(UpperCAmelCase__ ) ) # Size of every segment __A : Dict = [True] * (end + 1) __A : int = [] while start <= end: if temp[start] is True: in_prime.append(UpperCAmelCase__ ) for i in range(start * start, end + 1, UpperCAmelCase__ ): __A : Dict = False start += 1 prime += in_prime __A : str = end + 1 __A : int = min(2 * end, UpperCAmelCase__ ) while low <= n: __A : int = [True] * (high - low + 1) for each in in_prime: __A : List[Any] = math.floor(low / each ) * each if t < low: t += each for j in range(UpperCAmelCase__, high + 1, UpperCAmelCase__ ): __A : Any = False for j in range(len(UpperCAmelCase__ ) ): if temp[j] is True: prime.append(j + low ) __A : str = high + 1 __A : Optional[Any] = min(high + end, UpperCAmelCase__ ) return prime print(sieve(1_0**6))
714
'''simple docstring''' from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _lowerCamelCase : '''simple docstring''' def __init__( self , __lowercase = None ): """simple docstring""" if components is None: __A : Optional[int] = [] __A : Dict = list(__lowercase ) def __len__( self ): """simple docstring""" return len(self.__components ) def __str__( self ): """simple docstring""" return "(" + ",".join(map(__lowercase , self.__components ) ) + ")" def __add__( self , __lowercase ): """simple docstring""" __A : Union[str, Any] = len(self ) if size == len(__lowercase ): __A : Dict = [self.__components[i] + other.component(__lowercase ) for i in range(__lowercase )] return Vector(__lowercase ) else: raise Exception('must have the same size' ) def __sub__( self , __lowercase ): """simple docstring""" __A : int = len(self ) if size == len(__lowercase ): __A : str = [self.__components[i] - other.component(__lowercase ) for i in range(__lowercase )] return Vector(__lowercase ) else: # error case raise Exception('must have the same size' ) @overload def __mul__( self , __lowercase ): """simple docstring""" ... @overload def __mul__( self , __lowercase ): """simple docstring""" ... def __mul__( self , __lowercase ): """simple docstring""" if isinstance(__lowercase , (float, int) ): __A : Dict = [c * other for c in self.__components] return Vector(__lowercase ) elif isinstance(__lowercase , __lowercase ) and len(self ) == len(__lowercase ): __A : Optional[Any] = len(self ) __A : Optional[int] = [self.__components[i] * other.component(__lowercase ) for i in range(__lowercase )] return sum(__lowercase ) else: # error case raise Exception('invalid operand!' ) def snake_case__ ( self ): """simple docstring""" return Vector(self.__components ) def snake_case__ ( self , __lowercase ): """simple docstring""" if isinstance(__lowercase , __lowercase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('index out of range' ) def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" assert -len(self.__components ) <= pos < len(self.__components ) __A : str = value def snake_case__ ( self ): """simple docstring""" if len(self.__components ) == 0: raise Exception('Vector is empty' ) __A : List[Any] = [c**2 for c in self.__components] return math.sqrt(sum(__lowercase ) ) def snake_case__ ( self , __lowercase , __lowercase = False ): """simple docstring""" __A : List[Any] = self * other __A : Any = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def _lowercase ( UpperCamelCase__ : int ): assert isinstance(UpperCamelCase__, UpperCamelCase__ ) return Vector([0] * dimension ) def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int ): assert isinstance(UpperCamelCase__, UpperCamelCase__ ) and (isinstance(UpperCamelCase__, UpperCamelCase__ )) __A : List[str] = [0] * dimension __A : Tuple = 1 return Vector(UpperCamelCase__ ) def _lowercase ( UpperCamelCase__ : float, UpperCamelCase__ : Vector, UpperCamelCase__ : Vector ): assert ( isinstance(UpperCamelCase__, UpperCamelCase__ ) and isinstance(UpperCamelCase__, UpperCamelCase__ ) and (isinstance(UpperCamelCase__, (int, float) )) ) return x * scalar + y def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ): random.seed(UpperCamelCase__ ) __A : Any = [random.randint(UpperCamelCase__, UpperCamelCase__ ) for _ in range(UpperCamelCase__ )] return Vector(UpperCamelCase__ ) class _lowerCamelCase : '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase ): """simple docstring""" __A : str = matrix __A : str = w __A : Dict = h def __str__( self ): """simple docstring""" __A : Tuple = '' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , __lowercase ): """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __A : Union[str, Any] = [] for i in range(self.__height ): __A : List[Any] = [ self.__matrix[i][j] + other.component(__lowercase , __lowercase ) for j in range(self.__width ) ] matrix.append(__lowercase ) return Matrix(__lowercase , self.__width , self.__height ) else: raise Exception('matrix must have the same dimension!' ) def __sub__( self , __lowercase ): """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __A : Dict = [] for i in range(self.__height ): __A : str = [ self.__matrix[i][j] - other.component(__lowercase , __lowercase ) for j in range(self.__width ) ] matrix.append(__lowercase ) return Matrix(__lowercase , self.__width , self.__height ) else: raise Exception('matrices must have the same dimension!' ) @overload def __mul__( self , __lowercase ): """simple docstring""" ... @overload def __mul__( self , __lowercase ): """simple docstring""" ... def __mul__( self , __lowercase ): """simple docstring""" if isinstance(__lowercase , __lowercase ): # matrix-vector if len(__lowercase ) == self.__width: __A : Tuple = zero_vector(self.__height ) for i in range(self.__height ): __A : Any = [ self.__matrix[i][j] * other.component(__lowercase ) for j in range(self.__width ) ] ans.change_component(__lowercase , sum(__lowercase ) ) return ans else: raise Exception( 'vector must have the same size as the ' 'number of columns of the matrix!' ) elif isinstance(__lowercase , (int, float) ): # matrix-scalar __A : Optional[Any] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__lowercase , self.__width , self.__height ) return None def snake_case__ ( self ): """simple docstring""" return self.__height def snake_case__ ( self ): """simple docstring""" return self.__width def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('change_component: indices out of bounds' ) def snake_case__ ( self , __lowercase , __lowercase , __lowercase ): """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __A : Dict = value else: raise Exception('change_component: indices out of bounds' ) def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square' ) __A : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__lowercase ) ): __A : Dict = minor[i][:y] + minor[i][y + 1 :] return Matrix(__lowercase , self.__width - 1 , self.__height - 1 ).determinant() def snake_case__ ( self , __lowercase , __lowercase ): """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__lowercase , __lowercase ) else: raise Exception('Indices out of bounds' ) def snake_case__ ( self ): """simple docstring""" if self.__height != self.__width: raise Exception('Matrix is not square' ) if self.__height < 1: raise Exception('Matrix has no element' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __A : List[str] = [ self.__matrix[0][y] * self.cofactor(0 , __lowercase ) for y in range(self.__width ) ] return sum(__lowercase ) def _lowercase ( UpperCamelCase__ : int ): __A : list[list[float]] = [[0] * n for _ in range(UpperCamelCase__ )] return Matrix(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ) def _lowercase ( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ): random.seed(UpperCamelCase__ ) __A : list[list[float]] = [ [random.randint(UpperCamelCase__, UpperCamelCase__ ) for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ ) ] return Matrix(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
540
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class A__ ( unittest.TestCase ): def __UpperCamelCase ( self : int ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =tempfile.mkdtemp() _SCREAMING_SNAKE_CASE =[ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] _SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) _SCREAMING_SNAKE_CASE ={ '''do_resize''': True, '''size''': {'''height''': 224, '''width''': 224}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73], '''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11], '''do_convert_rgb''': True, } _SCREAMING_SNAKE_CASE =os.path.join(self.tmpdirname , _a ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(_a , _a ) def __UpperCamelCase ( self : Optional[int] , **_a : str ) -> List[str]: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **_a ) def __UpperCamelCase ( self : List[Any] , **_a : Any ) -> Dict: """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a ) def __UpperCamelCase ( self : int , **_a : Optional[Any] ) -> Any: """simple docstring""" return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_a ) def __UpperCamelCase ( self : str ) -> Union[str, Any]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : int ) -> Optional[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : Any ) -> List[Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =self.get_rust_tokenizer() _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) processor_slow.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_a ) _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) processor_fast.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _a ) self.assertIsInstance(processor_fast.tokenizer , _a ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _a ) self.assertIsInstance(processor_fast.image_processor , _a ) def __UpperCamelCase ( self : str ) -> Union[str, Any]: """simple docstring""" _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _SCREAMING_SNAKE_CASE =self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' ) _SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=_a ) _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=_a ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _a ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _a ) def __UpperCamelCase ( self : List[Any] ) -> Tuple: """simple docstring""" _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE =self.prepare_image_inputs() _SCREAMING_SNAKE_CASE =image_processor(_a , return_tensors='''np''' ) _SCREAMING_SNAKE_CASE =processor(images=_a , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : Union[str, Any] ) -> Any: """simple docstring""" _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。''' _SCREAMING_SNAKE_CASE =processor(text=_a ) _SCREAMING_SNAKE_CASE =tokenizer(_a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase ( self : Tuple ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。''' _SCREAMING_SNAKE_CASE =self.prepare_image_inputs() _SCREAMING_SNAKE_CASE =processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(_a ): processor() def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _SCREAMING_SNAKE_CASE =processor.batch_decode(_a ) _SCREAMING_SNAKE_CASE =tokenizer.batch_decode(_a ) self.assertListEqual(_a , _a ) def __UpperCamelCase ( self : Any ) -> List[str]: """simple docstring""" _SCREAMING_SNAKE_CASE =self.get_image_processor() _SCREAMING_SNAKE_CASE =self.get_tokenizer() _SCREAMING_SNAKE_CASE =ChineseCLIPProcessor(tokenizer=_a , image_processor=_a ) _SCREAMING_SNAKE_CASE ='''Alexandra,T-shirt的价格是15便士。''' _SCREAMING_SNAKE_CASE =self.prepare_image_inputs() _SCREAMING_SNAKE_CASE =processor(text=_a , images=_a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
691
from manim import * class A__ ( UpperCamelCase__ ): def __UpperCamelCase ( self : Dict ) -> int: """simple docstring""" _SCREAMING_SNAKE_CASE =Rectangle(height=0.5 , width=0.5 ) _SCREAMING_SNAKE_CASE =Rectangle(height=0.25 , width=0.25 ) _SCREAMING_SNAKE_CASE =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) _SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =Text('''CPU''' , font_size=24 ) _SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) cpu.move_to([-2.5, -0.5, 0] ) self.add(_a ) _SCREAMING_SNAKE_CASE =[mem.copy() for i in range(4 )] _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =Text('''GPU''' , font_size=24 ) _SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) gpu.move_to([-1, -1, 0] ) self.add(_a ) _SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =Text('''Model''' , font_size=24 ) _SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) model.move_to([3, -1.0, 0] ) self.add(_a ) _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =[] for i, rect in enumerate(_a ): rect.set_stroke(_a ) _SCREAMING_SNAKE_CASE =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_a , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_a ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=_a , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=_a , buff=0.0 ) self.add(_a ) model_cpu_arr.append(_a ) self.add(*_a , *_a , *_a ) _SCREAMING_SNAKE_CASE =[mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =Text('''Loaded Checkpoint''' , font_size=24 ) _SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) checkpoint.move_to([3, 0.5, 0] ) self.add(_a ) _SCREAMING_SNAKE_CASE =[] _SCREAMING_SNAKE_CASE =[] for i, rect in enumerate(_a ): _SCREAMING_SNAKE_CASE =fill.copy().set_fill(_a , opacity=0.7 ) target.move_to(_a ) ckpt_arr.append(_a ) _SCREAMING_SNAKE_CASE =target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(_a ) self.add(*_a , *_a ) _SCREAMING_SNAKE_CASE =Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) _SCREAMING_SNAKE_CASE =MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(_a , _a ) _SCREAMING_SNAKE_CASE =MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , ) blue_text.next_to(_a , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(_a ) _SCREAMING_SNAKE_CASE =MarkupText( f"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , ) step_a.move_to([2, 2, 0] ) _SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE =[meta_mem.copy() for i in range(6 )] _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =VGroup(*_a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =VGroup(_a , _a ).arrange(_a , buff=0 ) _SCREAMING_SNAKE_CASE =Text('''Disk''' , font_size=24 ) _SCREAMING_SNAKE_CASE =Group(_a , _a ).arrange(_a , buff=0.5 , aligned_edge=_a ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(_a , run_time=3 ) , Write(_a , run_time=1 ) , Create(_a , run_time=1 ) ) _SCREAMING_SNAKE_CASE =[] for i, rect in enumerate(_a ): _SCREAMING_SNAKE_CASE =rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(_a , run_time=1.5 ) ) self.play(*_a ) self.play(FadeOut(_a ) ) _SCREAMING_SNAKE_CASE =MarkupText(f"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(_a , run_time=3 ) ) self.play( FadeOut(_a , _a , *_a , *_a ) , ) self.wait()
691
1
def A_ ( ): """simple docstring""" for n in range(1 , 1_0_0_0_0_0_0 ): yield n * (n + 1) // 2 def A_ ( a ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 1 SCREAMING_SNAKE_CASE_ : Union[str, Any] = 2 while i * i <= n: SCREAMING_SNAKE_CASE_ : Optional[int] = 0 while n % i == 0: n //= i multiplicity += 1 divisors_count *= multiplicity + 1 i += 1 if n > 1: divisors_count *= 2 return divisors_count def A_ ( ): """simple docstring""" return next(i for i in triangle_number_generator() if count_divisors(a ) > 5_0_0 ) if __name__ == "__main__": print(solution())
353
from ..utils import DummyObject, requires_backends class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Optional[int] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : List[str] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Any = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Optional[Any] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : int = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : List[str] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : int = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Any = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Optional[Any] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Dict = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Any = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Union[str, Any] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) class _A ( metaclass=__magic_name__): SCREAMING_SNAKE_CASE : Union[str, Any] = ['''flax'''] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(self , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] ) @classmethod def UpperCAmelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): """simple docstring""" requires_backends(cls , ['flax'] )
353
1
import numpy as np import pandas as pd from sklearn.preprocessing import Normalizer from sklearn.svm import SVR from statsmodels.tsa.statespace.sarimax import SARIMAX def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ): UpperCamelCase :Dict = np.array([[1, item, train_mtch[i]] for i, item in enumerate(SCREAMING_SNAKE_CASE__ )] ) UpperCamelCase :Tuple = np.array(SCREAMING_SNAKE_CASE__ ) UpperCamelCase :List[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , SCREAMING_SNAKE_CASE__ ) ) , x.transpose() ) , SCREAMING_SNAKE_CASE__ ) return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] ) def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ): UpperCamelCase :Optional[int] = (1, 2, 1) UpperCamelCase :List[Any] = (1, 1, 0, 7) UpperCamelCase :int = SARIMAX( SCREAMING_SNAKE_CASE__ , exog=SCREAMING_SNAKE_CASE__ , order=SCREAMING_SNAKE_CASE__ , seasonal_order=SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Optional[Any] = model.fit(disp=SCREAMING_SNAKE_CASE__ , maxiter=600 , method='''nm''' ) UpperCamelCase :Any = model_fit.predict(1 , len(SCREAMING_SNAKE_CASE__ ) , exog=[test_match] ) return result[0] def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ): UpperCamelCase :int = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 ) regressor.fit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) UpperCamelCase :Any = regressor.predict(SCREAMING_SNAKE_CASE__ ) return y_pred[0] def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ): train_user.sort() UpperCamelCase :List[Any] = np.percentile(SCREAMING_SNAKE_CASE__ , 25 ) UpperCamelCase :Union[str, Any] = np.percentile(SCREAMING_SNAKE_CASE__ , 75 ) UpperCamelCase :Optional[Any] = qa - qa UpperCamelCase :Union[str, Any] = qa - (iqr * 0.1) return low_lim def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ): UpperCamelCase :Optional[Any] = 0 UpperCamelCase :Optional[Any] = 0 for i in list_vote: if i > actual_result: UpperCamelCase :List[str] = not_safe + 1 else: if abs(abs(SCREAMING_SNAKE_CASE__ ) - abs(SCREAMING_SNAKE_CASE__ ) ) <= 0.1: safe += 1 else: not_safe += 1 return safe > not_safe if __name__ == "__main__": # data_input_df = pd.read_csv("ex_data.csv", header=None) __snake_case = [[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]] __snake_case = pd.DataFrame( data_input, columns=["""total_user""", """total_even""", """days"""] ) __snake_case = Normalizer().fit_transform(data_input_df.values) # split data __snake_case = normalize_df[:, 2].tolist() __snake_case = normalize_df[:, 0].tolist() __snake_case = normalize_df[:, 1].tolist() # for svr (input variable = total date and total match) __snake_case = normalize_df[:, [1, 2]].tolist() __snake_case = x[: len(x) - 1] __snake_case = x[len(x) - 1 :] # for linear regression & sarimax __snake_case = total_date[: len(total_date) - 1] __snake_case = total_user[: len(total_user) - 1] __snake_case = total_match[: len(total_match) - 1] __snake_case = total_date[len(total_date) - 1 :] __snake_case = total_user[len(total_user) - 1 :] __snake_case = total_match[len(total_match) - 1 :] # voting system with forecasting __snake_case = [ linear_regression_prediction( trn_date, trn_user, trn_match, tst_date, tst_match ), sarimax_predictor(trn_user, trn_match, tst_match), support_vector_regressor(x_train, x_test, trn_user), ] # check the safety of today's data __snake_case = """""" if data_safety_checker(res_vote, tst_user) else """not """ print("""Today's data is {not_str}safe.""")
658
def _A ( lowerCamelCase ): a__ : Tuple = [] a__ : str = set({"(", "[", "{"} ) a__ : List[str] = set({")", "]", "}"} ) a__ : int = {"{": "}", "[": "]", "(": ")"} for i in range(len(lowerCamelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(lowerCamelCase ) == 0 or (len(lowerCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(lowerCamelCase ) == 0 def _A ( ): a__ : Optional[int] = input("Enter sequence of brackets: " ) if is_balanced(lowerCamelCase ): print(lowerCamelCase , "is balanced" ) else: print(lowerCamelCase , "is not balanced" ) if __name__ == "__main__": main()
112
0
from __future__ import annotations from typing import Any def SCREAMING_SNAKE_CASE ( lowerCAmelCase ): if not postfix_notation: return 0 _UpperCamelCase = {'''+''', '''-''', '''*''', '''/'''} _UpperCamelCase = [] for token in postfix_notation: if token in operations: _UpperCamelCase , _UpperCamelCase = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(lowerCAmelCase ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
716
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase : str = logging.get_logger(__name__) lowercase : Tuple = {"""vocab_file""": """vocab.json"""} lowercase : int = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } lowercase : int = {"""mgp-str""": 27} class __A( __UpperCAmelCase ): __A = VOCAB_FILES_NAMES __A = PRETRAINED_VOCAB_FILES_MAP __A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self, A, A="[GO]", A="[GO]", A="[s]", A="[GO]", **A ): """simple docstring""" super().__init__( unk_token=A, bos_token=A, eos_token=A, pad_token=A, **A, ) with open(A, encoding='''utf-8''' ) as vocab_handle: _UpperCamelCase = json.load(A ) _UpperCamelCase = {v: k for k, v in self.vocab.items()} @property def _UpperCamelCase ( self ): """simple docstring""" return len(self.vocab ) def _UpperCamelCase ( self ): """simple docstring""" return dict(self.vocab, **self.added_tokens_encoder ) def _UpperCamelCase ( self, A ): """simple docstring""" _UpperCamelCase = [] for s in text: char_tokens.extend(A ) return char_tokens def _UpperCamelCase ( self, A ): """simple docstring""" return self.vocab.get(A, self.vocab.get(self.unk_token ) ) def _UpperCamelCase ( self, A ): """simple docstring""" return self.decoder.get(A ) def _UpperCamelCase ( self, A, A = None ): """simple docstring""" if not os.path.isdir(A ): logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) ) return _UpperCamelCase = os.path.join( A, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) with open(A, '''w''', encoding='''utf-8''' ) as f: f.write(json.dumps(self.vocab, indent=2, sort_keys=A, ensure_ascii=A ) + '''\n''' ) return (vocab_file,)
105
0
from typing import Any class __lowerCamelCase : def __init__( self: Union[str, Any],A_: Any ): '''simple docstring''' __UpperCamelCase = data __UpperCamelCase = None class __lowerCamelCase : def __init__( self: Tuple ): '''simple docstring''' __UpperCamelCase = None def snake_case_ ( self: Tuple ): '''simple docstring''' __UpperCamelCase = self.head while temp is not None: print(temp.data,end=' ' ) __UpperCamelCase = temp.next print() def snake_case_ ( self: Union[str, Any],A_: Any ): '''simple docstring''' __UpperCamelCase = Node(A_ ) __UpperCamelCase = self.head __UpperCamelCase = new_node def snake_case_ ( self: str,A_: Union[str, Any],A_: Optional[Any] ): '''simple docstring''' if node_data_a == node_data_a: return else: __UpperCamelCase = self.head while node_a is not None and node_a.data != node_data_a: __UpperCamelCase = node_a.next __UpperCamelCase = self.head while node_a is not None and node_a.data != node_data_a: __UpperCamelCase = node_a.next if node_a is None or node_a is None: return __UpperCamelCase, __UpperCamelCase = node_a.data, node_a.data if __name__ == "__main__": __snake_case = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
1
class __UpperCAmelCase : """simple docstring""" def __init__( self , __A ): __a = set_counts __a = max(__A ) __a = len(__A ) __a = [1] * num_sets __a = list(range(__A ) ) def snake_case_ ( self , __A , __A ): __a = self.get_parent(__A ) __a = self.get_parent(__A ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] __a = 0 __a = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 __a = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] __a = 0 __a = src_parent __a = self.set_counts[src_parent] __a = max(self.max_set , __A ) return True def snake_case_ ( self , __A ): if self.parents[disj_set] == disj_set: return disj_set __a = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
99
0
'''simple docstring''' from math import factorial def lowercase__ ( __UpperCamelCase = 20 )-> int: UpperCamelCase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... UpperCamelCase = n // 2 return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(2_0)) else: try: SCREAMING_SNAKE_CASE__ = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
35
'''simple docstring''' from __future__ import annotations def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance < 0: raise ValueError("""Resistance cannot be negative""" ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
35
1
import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def __lowerCAmelCase ( __magic_name__=None , __magic_name__=None ): return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE__ ) @dataclass class A : '''simple docstring''' lowerCamelCase : str = field( metadata={"""help""": """The csv file to plot."""} , ) lowerCamelCase : bool = field( default=lowerCamelCase_ , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , ) lowerCamelCase : bool = field( default=lowerCamelCase_ , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , ) lowerCamelCase : bool = field( default=lowerCamelCase_ , metadata={"""help""": """Disable logarithmic scale when plotting"""} , ) lowerCamelCase : bool = field( default=lowerCamelCase_ , metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" } , ) lowerCamelCase : Optional[str] = field( default=lowerCamelCase_ , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , ) lowerCamelCase : Optional[List[str]] = list_field( default=lowerCamelCase_ , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def __lowerCAmelCase ( __magic_name__ ): try: int(SCREAMING_SNAKE_CASE__ ) return True except ValueError: return False def __lowerCAmelCase ( __magic_name__ ): try: float(SCREAMING_SNAKE_CASE__ ) return True except ValueError: return False class A : '''simple docstring''' def __init__( self : int , _UpperCamelCase : Union[str, Any]): _lowercase: Union[str, Any] = args _lowercase: Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}}) with open(self.args.csv_file , newline="") as csv_file: _lowercase: int = csv.DictReader(_UpperCamelCase) for row in reader: _lowercase: int = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["batch_size"])) self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"])) if can_convert_to_int(row["result"]): # value is not None _lowercase: str = int(row["result"]) elif can_convert_to_float(row["result"]): # value is not None _lowercase: List[str] = float(row["result"]) def UpperCAmelCase__ ( self : List[Any]): _lowercase: List[Any] = plt.subplots() _lowercase: Any = """Time usage""" if self.args.is_time else """Memory usage""" _lowercase: Union[str, Any] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("log") ax.set_yscale("log") for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter()) for model_name_idx, model_name in enumerate(self.result_dict.keys()): _lowercase: str = sorted(set(self.result_dict[model_name]["bsz"])) _lowercase: Optional[int] = sorted(set(self.result_dict[model_name]["seq_len"])) _lowercase: Union[str, Any] = self.result_dict[model_name]["""result"""] (_lowercase): int = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) _lowercase: int = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: _lowercase: str = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCamelCase , ) else: _lowercase: Tuple = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) (_lowercase): str = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) _lowercase: int = np.asarray(_UpperCamelCase , _UpperCamelCase)[: len(_UpperCamelCase)] plt.scatter( _UpperCamelCase , _UpperCamelCase , label=f"{label_model_name} - {inner_loop_label}: {inner_loop_value}") plt.plot(_UpperCamelCase , _UpperCamelCase , "--") title_str += f" {label_model_name} vs." _lowercase: Tuple = title_str[:-4] _lowercase: Tuple = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(_UpperCamelCase) plt.xlabel(_UpperCamelCase) plt.ylabel(_UpperCamelCase) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file) else: plt.show() def __lowerCAmelCase ( ): _lowercase: List[str] = HfArgumentParser(SCREAMING_SNAKE_CASE__ ) _lowercase: List[str] = parser.parse_args_into_dataclasses()[0] _lowercase: Dict = Plot(args=SCREAMING_SNAKE_CASE__ ) plot.plot() if __name__ == "__main__": main()
226
import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin lowercase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right lowercase : Union[str, Any] = 50003 lowercase : Optional[int] = 50002 @require_sentencepiece @require_tokenizers class __snake_case ( lowerCAmelCase , unittest.TestCase ): _a : List[str]= PLBartTokenizer _a : List[str]= None _a : str= False def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : str = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case ) lowercase : List[str] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( snake_case ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) lowercase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual( snake_case ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) lowercase : Any = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) lowercase : List[Any] = tokenizer.vocab_size lowercase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 4 ,snake_case )] self.assertListEqual(snake_case ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) lowercase : Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" lowercase : Tuple = tokenizer(snake_case ).input_ids self.assertEqual( tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""multi""" ,keep_accents=snake_case ) lowercase : str = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,) lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( snake_case ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] ,) lowercase : Tuple = tokenizer.convert_tokens_to_ids(snake_case ) self.assertListEqual( snake_case ,[ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] ,) lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case ) self.assertListEqual( snake_case ,[ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] ,) lowercase : List[Any] = tokenizer.vocab_size lowercase : int = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 7 ,snake_case )] self.assertListEqual( snake_case ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) lowercase : int = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go""" lowercase : Optional[Any] = tokenizer(snake_case ).input_ids self.assertEqual( tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,) @require_torch @require_sentencepiece @require_tokenizers class __snake_case ( unittest.TestCase ): _a : str= "uclanlp/plbart-python-en_XX" _a : Union[str, Any]= [ "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])", "def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])", ] _a : List[Any]= [ "Returns the maximum value of a b c.", "Sums the values of a b c.", ] _a : Optional[Any]= [ 134, 5452, 3_3460, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 988, 20, 3_3456, 19, 3_3456, 771, 39, 4258, 889, 3318, 3_3441, 3_3463, 3_3465, 3_3463, 3_3449, 2471, 2, PYTHON_CODE, ] @classmethod def _SCREAMING_SNAKE_CASE ( cls ): '''simple docstring''' lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" ) lowercase : Union[str, Any] = 1 return cls def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,50001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,50002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,50003 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertIn(snake_case ,self.tokenizer.all_special_ids ) lowercase : List[str] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2] lowercase : int = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case ) lowercase : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case ) self.assertEqual(snake_case ,snake_case ) self.assertNotIn(self.tokenizer.eos_token ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20] self.assertIsInstance(src_text[0] ,snake_case ) lowercase : Tuple = 10 lowercase : List[str] = self.tokenizer(snake_case ,max_length=snake_case ,truncation=snake_case ).input_ids[0] self.assertEqual(ids[-2] ,2 ) self.assertEqual(ids[-1] ,snake_case ) self.assertEqual(len(snake_case ) ,snake_case ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[50004, 50001] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : List[str] = tempfile.mkdtemp() lowercase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(snake_case ) lowercase : Optional[Any] = PLBartTokenizer.from_pretrained(snake_case ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case ) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case ,return_tensors="""pt""" ) lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] ,snake_case ) self.assertEqual(batch.decoder_input_ids[1][-1] ,2 ) self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] ) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = self.tokenizer( self.src_text ,text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,) lowercase : str = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id ) self.assertIsInstance(snake_case ,snake_case ) self.assertEqual((2, 26) ,batch.input_ids.shape ) self.assertEqual((2, 26) ,batch.attention_mask.shape ) lowercase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens ,snake_case ) self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens ,[] ) self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = self.tokenizer(self.src_text ,padding=snake_case ,truncation=snake_case ,max_length=3 ,return_tensors="""pt""" ) lowercase : str = self.tokenizer( text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=10 ,return_tensors="""pt""" ) lowercase : int = targets["""input_ids"""] lowercase : Tuple = shift_tokens_right(snake_case ,self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] ,3 ) self.assertEqual(batch.decoder_input_ids.shape[1] ,10 ) @require_torch def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = self.tokenizer._build_translation_inputs( """A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" ) self.assertEqual( nested_simplify(snake_case ) ,{ # A, test, EOS, en_XX """input_ids""": [[150, 242, 2, 50003]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 50001, } ,)
336
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Any ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Dict ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Dict ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: str ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Any ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[int] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Union[str, Any] ,*__lowerCAmelCase: int ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Any ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Dict ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: int ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[Any] ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: str ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: int ,*__lowerCAmelCase: str ,**__lowerCAmelCase: str ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: str ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: List[str] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[int] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: int ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Dict ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: int ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: List[str] ,*__lowerCAmelCase: str ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: List[str] ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: str ,*__lowerCAmelCase: Union[str, Any] ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Dict ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[int] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Any ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[Any] ,*__lowerCAmelCase: Tuple ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: List[Any] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: Optional[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Union[str, Any] ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Optional[int] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[Any] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Optional[Any] ,*__lowerCAmelCase: Optional[Any] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Union[str, Any] ,*__lowerCAmelCase: Dict ,**__lowerCAmelCase: int ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: str ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: List[Any] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: List[str] ,*__lowerCAmelCase: Optional[int] ,**__lowerCAmelCase: Tuple ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: int ,*__lowerCAmelCase: int ,**__lowerCAmelCase: int ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Dict ,*__lowerCAmelCase: List[str] ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] ) class A_ ( metaclass=_a ): lowerCAmelCase__ = ['sentencepiece'] def __init__( self: Union[str, Any] ,*__lowerCAmelCase: Any ,**__lowerCAmelCase: List[str] ): '''simple docstring''' requires_backends(self ,["sentencepiece"] )
386
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A_ : def __init__( self: Any ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple=13 ,__lowerCAmelCase: Optional[int]=7 ,__lowerCAmelCase: Any=True ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: List[str]=True ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Union[str, Any]=99 ,__lowerCAmelCase: Optional[int]=24 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Optional[Any]=6 ,__lowerCAmelCase: int=37 ,__lowerCAmelCase: Dict="gelu" ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: str=0.1 ,__lowerCAmelCase: int=512 ,__lowerCAmelCase: Optional[Any]=16 ,__lowerCAmelCase: List[str]=2 ,__lowerCAmelCase: str=0.02 ,__lowerCAmelCase: Any=3 ,__lowerCAmelCase: int=None ,__lowerCAmelCase: List[str]=1_000 ,): '''simple docstring''' _lowerCamelCase : Union[str, Any] = parent _lowerCamelCase : List[str] = batch_size _lowerCamelCase : List[Any] = seq_length _lowerCamelCase : Tuple = is_training _lowerCamelCase : Union[str, Any] = use_input_mask _lowerCamelCase : int = use_token_type_ids _lowerCamelCase : Dict = use_labels _lowerCamelCase : Optional[Any] = vocab_size _lowerCamelCase : Union[str, Any] = hidden_size _lowerCamelCase : List[str] = num_hidden_layers _lowerCamelCase : Dict = num_attention_heads _lowerCamelCase : Dict = intermediate_size _lowerCamelCase : List[str] = hidden_act _lowerCamelCase : str = hidden_dropout_prob _lowerCamelCase : Optional[Any] = attention_probs_dropout_prob _lowerCamelCase : Tuple = max_position_embeddings _lowerCamelCase : Optional[int] = type_vocab_size _lowerCamelCase : int = type_sequence_label_size _lowerCamelCase : Optional[Any] = initializer_range _lowerCamelCase : str = num_labels _lowerCamelCase : Any = scope _lowerCamelCase : Dict = range_bbox def _lowercase ( self: List[Any] ): '''simple docstring''' _lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) _lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: _lowerCamelCase : Dict = bbox[i, j, 3] _lowerCamelCase : Dict = bbox[i, j, 1] _lowerCamelCase : Dict = t if bbox[i, j, 2] < bbox[i, j, 0]: _lowerCamelCase : Dict = bbox[i, j, 2] _lowerCamelCase : List[Any] = bbox[i, j, 0] _lowerCamelCase : Optional[Any] = t _lowerCamelCase : Dict = None if self.use_input_mask: _lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) _lowerCamelCase : str = None if self.use_token_type_ids: _lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) _lowerCamelCase : Union[str, Any] = None _lowerCamelCase : Union[str, Any] = None if self.use_labels: _lowerCamelCase : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) _lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) _lowerCamelCase : str = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def _lowercase ( self: Optional[Any] ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,): '''simple docstring''' _lowerCamelCase : Dict = LiltModel(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : Tuple = model(__lowerCAmelCase ,bbox=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ) _lowerCamelCase : Optional[Any] = model(__lowerCAmelCase ,bbox=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ) _lowerCamelCase : Dict = model(__lowerCAmelCase ,bbox=__lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Dict ,__lowerCAmelCase: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: str ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Dict ,): '''simple docstring''' _lowerCamelCase : int = self.num_labels _lowerCamelCase : str = LiltForTokenClassification(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : int = model( __lowerCAmelCase ,bbox=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,labels=__lowerCAmelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self: List[str] ,__lowerCAmelCase: str ,__lowerCAmelCase: Dict ,__lowerCAmelCase: Tuple ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Dict ,): '''simple docstring''' _lowerCamelCase : Optional[int] = LiltForQuestionAnswering(config=__lowerCAmelCase ) model.to(__lowerCAmelCase ) model.eval() _lowerCamelCase : List[Any] = model( __lowerCAmelCase ,bbox=__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,start_positions=__lowerCAmelCase ,end_positions=__lowerCAmelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : List[Any] = self.prepare_config_and_inputs() ( ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ( _lowerCamelCase ), ) : int = config_and_inputs _lowerCamelCase : Optional[Any] = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A_ ( _a , _a , _a , unittest.TestCase ): lowerCAmelCase__ = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase__ = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase__ = False lowerCAmelCase__ = False def _lowercase ( self: Tuple ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Tuple ): '''simple docstring''' return True def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : Union[str, Any] = LiltModelTester(self ) _lowerCamelCase : Dict = ConfigTester(self ,config_class=__lowerCAmelCase ,hidden_size=37 ) def _lowercase ( self: Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def _lowercase ( self: Tuple ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Optional[int] ): '''simple docstring''' _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _lowerCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*__lowerCAmelCase ) def _lowercase ( self: Union[str, Any] ): '''simple docstring''' _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase ) def _lowercase ( self: List[str] ): '''simple docstring''' _lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase ) @slow def _lowercase ( self: Optional[Any] ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCamelCase : Optional[int] = LiltModel.from_pretrained(__lowerCAmelCase ) self.assertIsNotNone(__lowerCAmelCase ) @require_torch @slow class A_ ( unittest.TestCase ): def _lowercase ( self: Dict ): '''simple docstring''' _lowerCamelCase : Tuple = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(__lowerCAmelCase ) _lowerCamelCase : Dict = torch.tensor([[1, 2]] ,device=__lowerCAmelCase ) _lowerCamelCase : int = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=__lowerCAmelCase ) # forward pass with torch.no_grad(): _lowerCamelCase : int = model(input_ids=__lowerCAmelCase ,bbox=__lowerCAmelCase ) _lowerCamelCase : int = torch.Size([1, 2, 768] ) _lowerCamelCase : str = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] ,device=__lowerCAmelCase ,) self.assertTrue(outputs.last_hidden_state.shape ,__lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,__lowerCAmelCase ,atol=1e-3 ) )
386
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) def _A ( lowerCamelCase , lowerCamelCase=False ): a__ : Union[str, Any] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ("cls_token", "vit.embeddings.cls_token"), ("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" a__ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: a__ : Any = "" else: a__ : List[Any] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) a__ : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" ) a__ : int = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict a__ : Optional[int] = in_proj_weight[ : config.hidden_size, : ] a__ : Any = in_proj_bias[: config.hidden_size] a__ : Any = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] a__ : Union[str, Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] a__ : str = in_proj_weight[ -config.hidden_size :, : ] a__ : Optional[int] = in_proj_bias[-config.hidden_size :] def _A ( lowerCamelCase ): a__ : Union[str, Any] = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(_lowerCAmelCase , _lowerCAmelCase ) def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ): a__ : str = dct.pop(_lowerCAmelCase ) a__ : str = val def _A ( ): a__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg" a__ : int = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase=True ): a__ : Dict = ViTConfig() # patch_size if model_name[-1] == "8": a__ : Dict = 8 # set labels if required if not base_model: a__ : Tuple = 1000 a__ : List[str] = "huggingface/label-files" a__ : Optional[int] = "imagenet-1k-id2label.json" a__ : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) ) a__ : Tuple = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} a__ : Dict = idalabel a__ : List[str] = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: a__ : Any = 384 a__ : Union[str, Any] = 1536 a__ : Union[str, Any] = 12 a__ : Optional[int] = 6 # load original model from torch hub a__ : Optional[int] = torch.hub.load("facebookresearch/dino:main" , _lowerCAmelCase ) original_model.eval() # load state_dict of original model, remove and rename some keys a__ : Dict = original_model.state_dict() if base_model: remove_classification_head_(_lowerCAmelCase ) a__ : Dict = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model if base_model: a__ : Optional[int] = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ).eval() else: a__ : Optional[int] = ViTForImageClassification(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by ViTImageProcessor a__ : Optional[int] = ViTImageProcessor() a__ : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ) a__ : Optional[Any] = encoding["pixel_values"] a__ : Any = model(_lowerCAmelCase ) if base_model: a__ : Optional[int] = original_model(_lowerCAmelCase ) assert torch.allclose(_lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: a__ : List[str] = original_model(_lowerCAmelCase ) assert logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowerCAmelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""dino_vitb16""", type=str, help="""Name of the model trained with DINO you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--base_model""", action="""store_true""", help="""Whether to only convert the base model (no projection head weights).""", ) parser.set_defaults(base_model=True) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
112
'''simple docstring''' import functools def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ): """simple docstring""" if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(_lowerCAmelCase ) == 0: return 0 if min(_lowerCAmelCase ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(_lowerCAmelCase ) >= 366: raise ValueError("All days elements should be less than 366" ) _lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase ) @functools.cache def dynamic_programming(_lowerCAmelCase : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
44
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device __UpperCAmelCase = False class _a ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class _a ( unittest.TestCase ): """simple docstring""" def __a ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self ): SCREAMING_SNAKE_CASE : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) SCREAMING_SNAKE_CASE : List[str] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = pipe.dual_guided( prompt='first prompt' ,image=__lowerCamelCase ,text_to_image_strength=0.75 ,generator=__lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained(__lowerCamelCase ,torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = pipe.dual_guided( prompt='first prompt' ,image=__lowerCamelCase ,text_to_image_strength=0.75 ,generator=__lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __a ( self ): SCREAMING_SNAKE_CASE : Tuple = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) SCREAMING_SNAKE_CASE : Tuple = '''cyberpunk 2077''' SCREAMING_SNAKE_CASE : List[str] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' ) SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : List[Any] = pipe.dual_guided( prompt=__lowerCamelCase ,image=__lowerCamelCase ,text_to_image_strength=0.75 ,generator=__lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images SCREAMING_SNAKE_CASE : Any = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 SCREAMING_SNAKE_CASE : Tuple = '''A painting of a squirrel eating a burger ''' SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image( prompt=__lowerCamelCase ,generator=__lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images SCREAMING_SNAKE_CASE : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : Optional[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 SCREAMING_SNAKE_CASE : List[Any] = pipe.image_variation(__lowerCamelCase ,generator=__lowerCamelCase ,output_type='numpy' ).images SCREAMING_SNAKE_CASE : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE : str = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
702
'''simple docstring''' from pathlib import Path import numpy as np from PIL import Image def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray: return (gray > 127) & (gray <= 255) def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray: SCREAMING_SNAKE_CASE : List[Any] = np.zeros_like(snake_case_ ) SCREAMING_SNAKE_CASE : Tuple = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE : Optional[int] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE : Optional[int] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE : int = int(summation > 0 ) return output if __name__ == "__main__": # read original image __UpperCAmelCase = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' __UpperCAmelCase = np.array(Image.open(lena_path)) # kernel to be applied __UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __UpperCAmelCase = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
220
0
"""simple docstring""" from typing import Optional, Tuple, Union import tensorflow as tf from ...activations_tf import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_tf_outputs import ( TFBaseModelOutputWithNoAttention, TFBaseModelOutputWithPoolingAndNoAttention, TFSequenceClassifierOutput, ) from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs from ...tf_utils import shape_list from ...utils import logging from .configuration_regnet import RegNetConfig a_ = logging.get_logger(__name__) # General docstring a_ = """RegNetConfig""" # Base docstring a_ = """facebook/regnet-y-040""" a_ = [1, 1_0_8_8, 7, 7] # Image classification docstring a_ = """facebook/regnet-y-040""" a_ = """tabby, tabby cat""" a_ = [ """facebook/regnet-y-040""", # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 3 , UpperCamelCase_ = 1 , UpperCamelCase_ = 1 , UpperCamelCase_ = "relu" , **UpperCamelCase_ , ) -> List[str]: super().__init__(**A_ ) # The padding and conv has been verified in # https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb __lowercase : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 ) __lowercase : Dict = tf.keras.layers.ConvaD( filters=A_ , kernel_size=A_ , strides=A_ , padding='''VALID''' , groups=A_ , use_bias=A_ , name='''convolution''' , ) __lowercase : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' ) __lowercase : int = ACTaFN[activation] if activation is not None else tf.identity def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]: __lowercase : Tuple = self.convolution(self.padding(A_ ) ) __lowercase : Any = self.normalization(A_ ) __lowercase : Tuple = self.activation(A_ ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: super().__init__(**A_ ) __lowercase : Dict = config.num_channels __lowercase : Optional[int] = TFRegNetConvLayer( out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: __lowercase : str = shape_list(A_ )[1] if tf.executing_eagerly() and num_channels != self.num_channels: raise ValueError( '''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' ) # When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format. # So change the input format from `NCHW` to `NHWC`. # shape = (batch_size, in_height, in_width, in_channels=num_channels) __lowercase : Optional[Any] = tf.transpose(A_ , perm=(0, 2, 3, 1) ) __lowercase : Tuple = self.embedder(A_ ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ = 2 , **UpperCamelCase_ ) -> int: super().__init__(**A_ ) __lowercase : Optional[Any] = tf.keras.layers.ConvaD( filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='''convolution''' ) __lowercase : Dict = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='''normalization''' ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False ) -> Tuple: return self.normalization(self.convolution(A_ ) , training=A_ ) class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]: super().__init__(**A_ ) __lowercase : Tuple = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='''pooler''' ) __lowercase : Any = [ tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ), tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ), ] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Optional[int] = self.pooler(A_ ) for layer_module in self.attention: __lowercase : List[str] = layer_module(A_ ) __lowercase : Tuple = hidden_state * pooled return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , **UpperCamelCase_ ) -> Dict: super().__init__(**A_ ) __lowercase : Tuple = in_channels != out_channels or stride != 1 __lowercase : str = max(1 , out_channels // config.groups_width ) __lowercase : Union[str, Any] = ( TFRegNetShortCut(A_ , stride=A_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) # `self.layers` instead of `self.layer` because that is a reserved argument. __lowercase : Dict = [ TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='''layer.2''' ), ] __lowercase : Tuple = ACTaFN[config.hidden_act] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : str = hidden_state for layer_module in self.layers: __lowercase : Optional[int] = layer_module(A_ ) __lowercase : Optional[Any] = self.shortcut(A_ ) hidden_state += residual __lowercase : int = self.activation(A_ ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , **UpperCamelCase_ ) -> List[Any]: super().__init__(**A_ ) __lowercase : Tuple = in_channels != out_channels or stride != 1 __lowercase : Optional[int] = max(1 , out_channels // config.groups_width ) __lowercase : Union[str, Any] = ( TFRegNetShortCut(A_ , stride=A_ , name='''shortcut''' ) if should_apply_shortcut else tf.keras.layers.Activation('''linear''' , name='''shortcut''' ) ) __lowercase : int = [ TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ), TFRegNetConvLayer( A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='''layer.1''' ), TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ), TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='''layer.3''' ), ] __lowercase : Optional[Any] = ACTaFN[config.hidden_act] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Dict = hidden_state for layer_module in self.layers: __lowercase : Optional[Any] = layer_module(A_ ) __lowercase : str = self.shortcut(A_ ) hidden_state += residual __lowercase : Any = self.activation(A_ ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 2 , UpperCamelCase_ = 2 , **UpperCamelCase_ ) -> str: super().__init__(**A_ ) __lowercase : List[Any] = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer __lowercase : str = [ # downsampling is done in the first layer with stride of 2 layer(A_ , A_ , A_ , stride=A_ , name='''layers.0''' ), *[layer(A_ , A_ , A_ , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )], ] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: for layer_module in self.layers: __lowercase : Optional[Any] = layer_module(A_ ) return hidden_state class UpperCAmelCase_ ( tf.keras.layers.Layer ): def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: super().__init__(**A_ ) __lowercase : List[Any] = [] # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( TFRegNetStage( A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) ) __lowercase : Dict = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ): self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F"""stages.{i+1}""" ) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = True ) -> Union[str, Any]: __lowercase : str = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: __lowercase : Any = hidden_states + (hidden_state,) __lowercase : Any = stage_module(A_ ) if output_hidden_states: __lowercase : Optional[int] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ ) @keras_serializable class UpperCAmelCase_ ( tf.keras.layers.Layer ): UpperCamelCase =RegNetConfig def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]: super().__init__(**A_ ) __lowercase : Optional[int] = config __lowercase : int = TFRegNetEmbeddings(A_ , name='''embedder''' ) __lowercase : str = TFRegNetEncoder(A_ , name='''encoder''' ) __lowercase : List[str] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='''pooler''' ) @unpack_inputs def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , ) -> Union[str, Any]: __lowercase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __lowercase : Tuple = self.embedder(A_ , training=A_ ) __lowercase : Optional[Any] = self.encoder( A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ ) __lowercase : str = encoder_outputs[0] __lowercase : Union[str, Any] = self.pooler(A_ ) # Change to NCHW output format have uniformity in the modules __lowercase : str = tf.transpose(A_ , perm=(0, 3, 1, 2) ) __lowercase : Union[str, Any] = tf.transpose(A_ , perm=(0, 3, 1, 2) ) # Change the other hidden state outputs to NCHW as well if output_hidden_states: __lowercase : Optional[int] = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , ) class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase =RegNetConfig UpperCamelCase ="""regnet""" UpperCamelCase ="""pixel_values""" @property def _lowerCamelCase ( self ) -> Optional[Any]: return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) , dtype=tf.floataa )} a_ = r""" Parameters: This model is a Tensorflow [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and behavior. config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ a_ = r""" Args: pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConveNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): def __init__( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: super().__init__(A_ , *A_ , **A_ ) __lowercase : Optional[int] = TFRegNetMainLayer(A_ , name='''regnet''' ) @unpack_inputs @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=False , ) -> Dict: __lowercase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowercase : List[str] = self.regnet( pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , ) if not return_dict: return (outputs[0],) + outputs[1:] return TFBaseModelOutputWithPoolingAndNoAttention( last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , ) @add_start_docstrings( "\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): def __init__( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: super().__init__(A_ , *A_ , **A_ ) __lowercase : Tuple = config.num_labels __lowercase : Dict = TFRegNetMainLayer(A_ , name='''regnet''' ) # classification head __lowercase : str = [ tf.keras.layers.Flatten(), tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity, ] @unpack_inputs @add_start_docstrings_to_model_forward(A_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _lowerCamelCase ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_=False , ) -> Union[str, Any]: __lowercase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowercase : Dict = self.regnet( A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ ) __lowercase : str = outputs.pooler_output if return_dict else outputs[1] __lowercase : Tuple = self.classifier[0](A_ ) __lowercase : List[Any] = self.classifier[1](A_ ) __lowercase : Any = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ ) if not return_dict: __lowercase : Any = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
76
import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _A : Optional[int] = object() # For specifying empty leaf dict `{}` _A : Tuple = object() def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict: SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(lowerCAmelCase_ ) - len(lowerCAmelCase_ ) + 1 ): SCREAMING_SNAKE_CASE__ = [x.match(lowerCAmelCase_ ) for x, y in zip(lowerCAmelCase_ , ks[i:] )] if matches and all(lowerCAmelCase_ ): return True return False def __snake_case ( lowerCAmelCase_ ) -> int: def replace(lowerCAmelCase_ , lowerCAmelCase_ ): for rule, replacement in rules: if _match(lowerCAmelCase_ , lowerCAmelCase_ ): return replacement return val return replace def __snake_case ( ) -> Optional[int]: return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase_ )), (("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase_ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase_ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowerCAmelCase_ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase_ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def __snake_case ( lowerCAmelCase_ ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = _get_partition_rules() SCREAMING_SNAKE_CASE__ = _replacement_rules(lowerCAmelCase_ ) SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(lowerCAmelCase_ )} SCREAMING_SNAKE_CASE__ = {k: replace(lowerCAmelCase_ , lowerCAmelCase_ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowerCAmelCase_ ) )
100
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=_UpperCamelCase ) class _lowercase ( _UpperCamelCase ): '''simple docstring''' _A = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} ) _A = Features({'text': Value('string' )} ) _A = Features({'labels': ClassLabel} ) _A = "text" _A = "labels" def lowerCAmelCase__ ( self , __UpperCamelCase )-> Optional[int]: if self.label_column not in features: raise ValueError(F"Column {self.label_column} is not present in features." ) if not isinstance(features[self.label_column] , __a ): raise ValueError(F"Column {self.label_column} is not a ClassLabel." ) UpperCAmelCase__ : Tuple = copy.deepcopy(self ) UpperCAmelCase__ : Optional[Any] = self.label_schema.copy() UpperCAmelCase__ : Optional[Any] = features[self.label_column] UpperCAmelCase__ : Union[str, Any] = label_schema return task_template @property def lowerCAmelCase__ ( self )-> Dict[str, str]: return { self.text_column: "text", self.label_column: "labels", }
707
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, is_valid_image, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL A__ : Dict = logging.get_logger(__name__) def a__ ( lowerCAmelCase : Optional[Any] ): '''simple docstring''' if isinstance(lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ): return videos elif isinstance(lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ): return [videos] elif is_valid_image(lowerCAmelCase ): return [[videos]] raise ValueError(F"Could not make batched video from {videos}" ) class _lowercase ( lowerCAmelCase_ ): '''simple docstring''' _A = ['pixel_values'] def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> None: super().__init__(**__UpperCamelCase ) UpperCAmelCase__ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56} UpperCAmelCase__ : List[Any] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) UpperCAmelCase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase__ : int = get_size_dict(__UpperCamelCase , param_name="crop_size" ) UpperCAmelCase__ : Dict = do_resize UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : List[Any] = do_center_crop UpperCAmelCase__ : str = crop_size UpperCAmelCase__ : Optional[int] = resample UpperCAmelCase__ : int = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : Union[str, Any] = offset UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN UpperCAmelCase__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray: UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) if "shortest_edge" in size: UpperCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCamelCase , size["shortest_edge"] , default_to_square=__UpperCamelCase ) elif "height" in size and "width" in size: UpperCAmelCase__ : Any = (size["height"], size["width"]) else: raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" ) return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray: UpperCAmelCase__ : Optional[Any] = get_size_dict(__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" ) return center_crop(__UpperCamelCase , size=(size["height"], size["width"]) , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , __UpperCamelCase = None , **__UpperCamelCase , )-> Tuple: UpperCAmelCase__ : str = image.astype(np.floataa ) if offset: UpperCAmelCase__ : Tuple = image - (scale / 2) return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , )-> np.ndarray: return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase ) def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , )-> np.ndarray: if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) if offset and not do_rescale: raise ValueError("For offset, do_rescale must also be set to True." ) # All transformations expect numpy arrays. UpperCAmelCase__ : Optional[Any] = to_numpy_array(__UpperCamelCase ) if do_resize: UpperCAmelCase__ : Union[str, Any] = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) if do_center_crop: UpperCAmelCase__ : int = self.center_crop(__UpperCamelCase , size=__UpperCamelCase ) if do_rescale: UpperCAmelCase__ : List[str] = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase , offset=__UpperCamelCase ) if do_normalize: UpperCAmelCase__ : List[Any] = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) UpperCAmelCase__ : Dict = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) return image def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> PIL.Image.Image: UpperCAmelCase__ : Optional[int] = do_resize if do_resize is not None else self.do_resize UpperCAmelCase__ : int = resample if resample is not None else self.resample UpperCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase__ : int = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase__ : Optional[int] = offset if offset is not None else self.offset UpperCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean UpperCAmelCase__ : Optional[int] = image_std if image_std is not None else self.image_std UpperCAmelCase__ : List[str] = size if size is not None else self.size UpperCAmelCase__ : Optional[int] = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase ) UpperCAmelCase__ : Dict = crop_size if crop_size is not None else self.crop_size UpperCAmelCase__ : Tuple = get_size_dict(__UpperCamelCase , param_name="crop_size" ) if not valid_images(__UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) UpperCAmelCase__ : List[str] = make_batched(__UpperCamelCase ) UpperCAmelCase__ : Optional[Any] = [ [ self._preprocess_image( image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , offset=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , ) for img in video ] for video in videos ] UpperCAmelCase__ : Dict = {"pixel_values": videos} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
660
0
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class __A ( UpperCamelCase__ ): UpperCamelCase = (IPNDMScheduler,) UpperCamelCase = (("""num_inference_steps""", 50),) def A__ ( self :int , **__snake_case :List[str] ): '''simple docstring''' __magic_name__ : Optional[int] ={"""num_train_timesteps""": 10_00} config.update(**__snake_case ) return config def A__ ( self :int , __snake_case :Any=0 , **__snake_case :List[str] ): '''simple docstring''' __magic_name__ : Optional[int] =dict(self.forward_default_kwargs ) __magic_name__ : List[str] =kwargs.pop("""num_inference_steps""" , __snake_case ) __magic_name__ : str =self.dummy_sample __magic_name__ : Dict =0.1 * sample __magic_name__ : Any =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __magic_name__ : int =self.get_scheduler_config(**__snake_case ) __magic_name__ : Optional[int] =scheduler_class(**__snake_case ) scheduler.set_timesteps(__snake_case ) # copy over dummy past residuals __magic_name__ : List[str] =dummy_past_residuals[:] if time_step is None: __magic_name__ : Dict =scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__snake_case ) __magic_name__ : int =scheduler_class.from_pretrained(__snake_case ) new_scheduler.set_timesteps(__snake_case ) # copy over dummy past residuals __magic_name__ : Tuple =dummy_past_residuals[:] __magic_name__ : Optional[int] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample __magic_name__ : List[str] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample __magic_name__ : List[str] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A__ ( self :List[Any] ): '''simple docstring''' pass def A__ ( self :Optional[int] , __snake_case :Optional[Any]=0 , **__snake_case :Dict ): '''simple docstring''' __magic_name__ : int =dict(self.forward_default_kwargs ) __magic_name__ : Dict =kwargs.pop("""num_inference_steps""" , __snake_case ) __magic_name__ : str =self.dummy_sample __magic_name__ : Optional[Any] =0.1 * sample __magic_name__ : Union[str, Any] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __magic_name__ : Dict =self.get_scheduler_config() __magic_name__ : Optional[Any] =scheduler_class(**__snake_case ) scheduler.set_timesteps(__snake_case ) # copy over dummy past residuals (must be after setting timesteps) __magic_name__ : Union[str, Any] =dummy_past_residuals[:] if time_step is None: __magic_name__ : Optional[Any] =scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__snake_case ) __magic_name__ : List[Any] =scheduler_class.from_pretrained(__snake_case ) # copy over dummy past residuals new_scheduler.set_timesteps(__snake_case ) # copy over dummy past residual (must be after setting timesteps) __magic_name__ : Tuple =dummy_past_residuals[:] __magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample __magic_name__ : Union[str, Any] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" __magic_name__ : Tuple =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample __magic_name__ : Optional[Any] =new_scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def A__ ( self :Optional[Any] , **__snake_case :List[Any] ): '''simple docstring''' __magic_name__ : List[Any] =self.scheduler_classes[0] __magic_name__ : List[str] =self.get_scheduler_config(**__snake_case ) __magic_name__ : Tuple =scheduler_class(**__snake_case ) __magic_name__ : List[str] =10 __magic_name__ : Optional[Any] =self.dummy_model() __magic_name__ : Any =self.dummy_sample_deter scheduler.set_timesteps(__snake_case ) for i, t in enumerate(scheduler.timesteps ): __magic_name__ : Union[str, Any] =model(__snake_case , __snake_case ) __magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample for i, t in enumerate(scheduler.timesteps ): __magic_name__ : Optional[Any] =model(__snake_case , __snake_case ) __magic_name__ : Optional[Any] =scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample return sample def A__ ( self :List[Any] ): '''simple docstring''' __magic_name__ : Optional[int] =dict(self.forward_default_kwargs ) __magic_name__ : str =kwargs.pop("""num_inference_steps""" , __snake_case ) for scheduler_class in self.scheduler_classes: __magic_name__ : Union[str, Any] =self.get_scheduler_config() __magic_name__ : Optional[int] =scheduler_class(**__snake_case ) __magic_name__ : Tuple =self.dummy_sample __magic_name__ : Tuple =0.1 * sample if num_inference_steps is not None and hasattr(__snake_case , """set_timesteps""" ): scheduler.set_timesteps(__snake_case ) elif num_inference_steps is not None and not hasattr(__snake_case , """set_timesteps""" ): __magic_name__ : Dict =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __magic_name__ : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __magic_name__ : str =dummy_past_residuals[:] __magic_name__ : Optional[Any] =scheduler.timesteps[5] __magic_name__ : List[str] =scheduler.timesteps[6] __magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample __magic_name__ : Tuple =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __magic_name__ : Optional[Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample __magic_name__ : Union[str, Any] =scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A__ ( self :Tuple ): '''simple docstring''' for timesteps in [1_00, 10_00]: self.check_over_configs(num_train_timesteps=__snake_case , time_step=__snake_case ) def A__ ( self :List[str] ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ): self.check_over_forward(num_inference_steps=__snake_case , time_step=__snake_case ) def A__ ( self :Any ): '''simple docstring''' __magic_name__ : Optional[Any] =self.full_loop() __magic_name__ : str =torch.mean(torch.abs(__snake_case ) ) assert abs(result_mean.item() - 2_54_05_29 ) < 10
21
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class snake_case_ ( __UpperCamelCase ): """simple docstring""" def __init__(self: str , *__UpperCAmelCase: Optional[int] , __UpperCAmelCase: Optional[int]=None , __UpperCAmelCase: Dict=None , **__UpperCAmelCase: Tuple ) -> Dict: '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) __a : str = eval_examples __a : List[Any] = post_process_function def UpperCAmelCase__ (self: Optional[int] , __UpperCAmelCase: Optional[Dataset] = None , __UpperCAmelCase: Optional[int]=None , __UpperCAmelCase: Optional[List[str]] = None , __UpperCAmelCase: str = "eval" , **__UpperCAmelCase: Any , ) -> Dict[str, float]: '''simple docstring''' __a : Optional[int] = gen_kwargs.copy() __a : Union[str, Any] = ( gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length ) __a : Dict = ( gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams ) __a : Optional[Any] = gen_kwargs __a : Union[str, Any] = self.eval_dataset if eval_dataset is None else eval_dataset __a : Optional[int] = self.get_eval_dataloader(__UpperCAmelCase ) __a : Tuple = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __a : int = self.compute_metrics __a : Dict = None __a : List[Any] = time.time() __a : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a : str = eval_loop( __UpperCAmelCase , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , ) finally: __a : Union[str, Any] = compute_metrics __a : str = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default __a : Union[str, Any] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) __a : Dict = self.compute_metrics(__UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): __a : int = metrics.pop(__UpperCAmelCase ) metrics.update(output.metrics ) else: __a : Optional[int] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __a : Tuple = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase ) return metrics def UpperCAmelCase__ (self: Dict , __UpperCAmelCase: List[str] , __UpperCAmelCase: List[str] , __UpperCAmelCase: Union[str, Any]=None , __UpperCAmelCase: str = "test" , **__UpperCAmelCase: List[Any] ) -> Union[str, Any]: '''simple docstring''' __a : Any = gen_kwargs.copy() __a : Tuple = self.get_test_dataloader(__UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. __a : Optional[Any] = self.compute_metrics __a : int = None __a : List[Any] = time.time() __a : Dict = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __a : Optional[Any] = eval_loop( __UpperCAmelCase , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , ) finally: __a : List[Any] = compute_metrics __a : Optional[Any] = self.args.eval_batch_size * self.args.world_size if f'{metric_key_prefix}_jit_compilation_time' in output.metrics: start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time'] output.metrics.update( speed_metrics( __UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output __a : Optional[int] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , "predict" ) __a : Dict = self.compute_metrics(__UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f'{metric_key_prefix}_' ): __a : List[str] = metrics.pop(__UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
351
0
'''simple docstring''' # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCAmelCase_ = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) print("Cuda available:", torch.cuda.is_available()) print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None)
490
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def SCREAMING_SNAKE_CASE ( a_ : Tuple ): # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0X4e00 and cp <= 0X9fff) or (cp >= 0X3400 and cp <= 0X4dbf) # or (cp >= 0X20000 and cp <= 0X2a6df) # or (cp >= 0X2a700 and cp <= 0X2b73f) # or (cp >= 0X2b740 and cp <= 0X2b81f) # or (cp >= 0X2b820 and cp <= 0X2ceaf) # or (cp >= 0Xf900 and cp <= 0Xfaff) or (cp >= 0X2f800 and cp <= 0X2fa1f) # ): # return True return False def SCREAMING_SNAKE_CASE ( a_ : str ): # word like '180' or '身高' or '神' for char in word: __a = ord(a_ ) if not _is_chinese_char(a_ ): return 0 return 1 def SCREAMING_SNAKE_CASE ( a_ : List[str] ): __a = set() for token in tokens: __a = len(a_ ) > 1 and is_chinese(a_ ) if chinese_word: word_set.add(a_ ) __a = list(a_ ) return word_list def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : set() ): if not chinese_word_set: return bert_tokens __a = max([len(a_ ) for w in chinese_word_set] ) __a = bert_tokens __a , __a = 0, len(a_ ) while start < end: __a = True if is_chinese(bert_word[start] ): __a = min(end - start , a_ ) for i in range(a_ , 1 , -1 ): __a = ''.join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __a = '##' + bert_word[j] __a = start + i __a = False break if single_word: start += 1 return bert_word def SCREAMING_SNAKE_CASE ( a_ : List[str] , a_ : LTP , a_ : BertTokenizer ): __a = [] for i in range(0 , len(a_ ) , 100 ): __a = ltp_tokenizer.seg(lines[i : i + 100] )[0] __a = [get_chinese_word(a_ ) for r in res] ltp_res.extend(a_ ) assert len(a_ ) == len(a_ ) __a = [] for i in range(0 , len(a_ ) , 100 ): __a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=a_ , truncation=a_ , max_length=512 ) bert_res.extend(res['input_ids'] ) assert len(a_ ) == len(a_ ) __a = [] for input_ids, chinese_word in zip(a_ , a_ ): __a = [] for id in input_ids: __a = bert_tokenizer._convert_id_to_token(a_ ) input_tokens.append(a_ ) __a = add_sub_symbol(a_ , a_ ) __a = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(a_ ): if token[:2] == "##": __a = token[2:] # save chinese tokens' pos if len(a_ ) == 1 and _is_chinese_char(ord(a_ ) ): ref_id.append(a_ ) ref_ids.append(a_ ) assert len(a_ ) == len(a_ ) return ref_ids def SCREAMING_SNAKE_CASE ( a_ : str ): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name , 'r' , encoding='utf-8' ) as f: __a = f.readlines() __a = [line.strip() for line in data if len(a_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __a = LTP(args.ltp ) # faster in GPU device __a = BertTokenizer.from_pretrained(args.bert ) __a = prepare_ref(a_ , a_ , a_ ) with open(args.save_path , 'w' , encoding='utf-8' ) as f: __a = [json.dumps(a_ ) + '\n' for ref in ref_ids] f.writelines(a_ ) if __name__ == "__main__": UpperCAmelCase_ = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") UpperCAmelCase_ = parser.parse_args() main(args)
490
1
import qiskit def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ): '''simple docstring''' UpperCamelCase__ = qiskit.Aer.get_backend('''aer_simulator''' ) # Create a Quantum Circuit acting on the q register UpperCamelCase__ = qiskit.QuantumCircuit(UpperCamelCase__, UpperCamelCase__ ) # Map the quantum measurement to the classical bits circuit.measure([0], [0] ) # Execute the circuit on the simulator UpperCamelCase__ = qiskit.execute(UpperCamelCase__, UpperCamelCase__, shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(UpperCamelCase__ ) if __name__ == "__main__": print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
240
import math import random def lowerCamelCase_ ( UpperCamelCase__ : float, UpperCamelCase__ : bool = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value lowercase = 0.02 def lowerCamelCase_ ( UpperCamelCase__ : int, UpperCamelCase__ : int ): '''simple docstring''' UpperCamelCase__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(UpperCamelCase__ ): # Forward propagation UpperCamelCase__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? UpperCamelCase__ = (expected / 100) - layer_a # Error delta UpperCamelCase__ = layer_1_error * sigmoid_function(UpperCamelCase__, UpperCamelCase__ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() lowercase = int(input("""Expected value: """)) lowercase = int(input("""Number of propagations: """)) print(forward_propagation(expected, number_propagations))
240
1
from torch import nn def a (lowerCAmelCase__ ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
209
from math import pow def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): if current_sum == needed_sum: # If the sum of the powers is equal to needed_sum, then we have a solution. solutions_count += 1 return current_sum, solutions_count __a = int(pow(lowerCAmelCase__ , lowerCAmelCase__ ) ) if current_sum + i_to_n <= needed_sum: # If the sum of the powers is less than needed_sum, then continue adding powers. current_sum += i_to_n __a , __a = backtrack( lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ ) current_sum -= i_to_n if i_to_n < needed_sum: # If the power of i is less than needed_sum, then try with the next power. __a , __a = backtrack( lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ ) return current_sum, solutions_count def a (lowerCAmelCase__ , lowerCAmelCase__ ): if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10): raise ValueError( """Invalid input\n""" """needed_sum must be between 1 and 1000, power between 2 and 10.""" ) return backtrack(lowerCAmelCase__ , lowerCAmelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count if __name__ == "__main__": import doctest doctest.testmod()
209
1
'''simple docstring''' import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 _lowerCamelCase = get_tests_dir("""fixtures/dummy-config.json""") class _snake_case (unittest.TestCase): def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = 0 def UpperCamelCase__ ( self ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): UpperCAmelCase_ : int = AutoConfig.for_model("roberta" ) self.assertIsInstance(_snake_case ,_snake_case ) def UpperCamelCase__ ( self ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. UpperCAmelCase_ : List[str] = os.path.join(_snake_case ,"fake-roberta" ) os.makedirs(_snake_case ,exist_ok=_snake_case ) with open(os.path.join(_snake_case ,"config.json" ) ,"w" ) as f: f.write(json.dumps({} ) ) UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertEqual(type(_snake_case ) ,_snake_case ) def UpperCamelCase__ ( self ): try: AutoConfig.register("custom" ,_snake_case ) # Wrong model type will raise an error with self.assertRaises(_snake_case ): AutoConfig.register("model" ,_snake_case ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_snake_case ): AutoConfig.register("bert" ,_snake_case ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCAmelCase_ : Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case ) UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case ) self.assertIsInstance(_snake_case ,_snake_case ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def UpperCamelCase__ ( self ): with self.assertRaisesRegex( _snake_case ,"bert-base is not a local folder and is not a valid model identifier" ): UpperCAmelCase_ : int = AutoConfig.from_pretrained("bert-base" ) def UpperCamelCase__ ( self ): with self.assertRaisesRegex( _snake_case ,R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained(_snake_case ,revision="aaaaaa" ) def UpperCamelCase__ ( self ): with self.assertRaisesRegex( _snake_case ,"hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." ,): UpperCAmelCase_ : Any = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def UpperCamelCase__ ( self ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(_snake_case ): UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_snake_case ): UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=_snake_case ) UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=_snake_case ) self.assertEqual(config.__class__.__name__ ,"NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(_snake_case ) UpperCAmelCase_ : Any = AutoConfig.from_pretrained(_snake_case ,trust_remote_code=_snake_case ) self.assertEqual(reloaded_config.__class__.__name__ ,"NewModelConfig" ) def UpperCamelCase__ ( self ): class _snake_case (__SCREAMING_SNAKE_CASE): __A : Optional[int] ="new-model" try: AutoConfig.register("new-model" ,_snake_case ) # If remote code is not set, the default is to use local UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" ) # If remote code is disabled, we load the local one. UpperCAmelCase_ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=_snake_case ) self.assertEqual(config.__class__.__name__ ,"NewModelConfigLocal" ) # If remote is enabled, we load from the Hub UpperCAmelCase_ : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ,trust_remote_code=_snake_case ) self.assertEqual(config.__class__.__name__ ,"NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
71
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _snake_case : Any = None _snake_case : Union[str, Any] = logging.get_logger(__name__) _snake_case : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _snake_case : int = { "vocab_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model" ), }, "tokenizer_file": { "moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json", "moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json", "moussaKam/barthez-orangesum-title": ( "https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json" ), }, } _snake_case : str = { "moussaKam/mbarthez": 10_24, "moussaKam/barthez": 10_24, "moussaKam/barthez-orangesum-title": 10_24, } _snake_case : Optional[int] = "▁" class UpperCamelCase_ ( __a ): '''simple docstring''' UpperCamelCase : Any = VOCAB_FILES_NAMES UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase : Dict = ['''input_ids''', '''attention_mask'''] UpperCamelCase : Optional[Any] = BarthezTokenizer def __init__( self :str , lowerCAmelCase__ :str=None , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :Union[str, Any]="<s>" , lowerCAmelCase__ :Optional[Any]="</s>" , lowerCAmelCase__ :str="</s>" , lowerCAmelCase__ :List[str]="<s>" , lowerCAmelCase__ :Dict="<unk>" , lowerCAmelCase__ :int="<pad>" , lowerCAmelCase__ :Optional[Any]="<mask>" , **lowerCAmelCase__ :Optional[int] , ) ->int: # Mask token behave like a normal word, i.e. include the space before it lowercase = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , ) lowercase = vocab_file lowercase = False if not self.vocab_file else True def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) ->List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase = [self.cls_token_id] lowercase = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def SCREAMING_SNAKE_CASE( self :Optional[int] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) ->List[int]: lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def SCREAMING_SNAKE_CASE( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) ->Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase = os.path.join( lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ): copyfile(self.vocab_file , lowerCAmelCase__ ) return (out_vocab_file,)
441
0
'''simple docstring''' # Lint as: python3 import itertools import os import re lowercase_ = re.compile(r'''([A-Z]+)([A-Z][a-z])''') lowercase_ = re.compile(r'''([a-z\d])([A-Z])''') lowercase_ = re.compile(r'''(?<!_)_(?!_)''') lowercase_ = re.compile(r'''(_{2,})''') lowercase_ = r'''^\w+(\.\w+)*$''' lowercase_ = r'''<>:/\|?*''' def UpperCamelCase__ ( a__ ): '''simple docstring''' _lowerCAmelCase =_uppercase_uppercase_re.sub(r'\1_\2' , a__ ) _lowerCAmelCase =_lowercase_uppercase_re.sub(r'\1_\2' , a__ ) return name.lower() def UpperCamelCase__ ( a__ ): '''simple docstring''' _lowerCAmelCase =_single_underscore_re.split(a__ ) _lowerCAmelCase =[_multiple_underscores_re.split(a__ ) for n in name] return "".join(n.capitalize() for n in itertools.chain.from_iterable(a__ ) if n != '' ) def UpperCamelCase__ ( a__ ): '''simple docstring''' if os.path.basename(a__ ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) return camelcase_to_snakecase(a__ ) def UpperCamelCase__ ( a__ , a__ ): '''simple docstring''' if os.path.basename(a__ ) != name: raise ValueError(F'''Should be a dataset name, not a path: {name}''' ) if not re.match(_split_re , a__ ): raise ValueError(F'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' ) return F'''{filename_prefix_for_name(a__ )}-{split}''' def UpperCamelCase__ ( a__ , a__ , a__ , a__=None ): '''simple docstring''' _lowerCAmelCase =filename_prefix_for_split(a__ , a__ ) if filetype_suffix: prefix += F'''.{filetype_suffix}''' _lowerCAmelCase =os.path.join(a__ , a__ ) return F'''{filepath}*''' def UpperCamelCase__ ( a__ , a__ , a__ , a__=None , a__=None ): '''simple docstring''' _lowerCAmelCase =filename_prefix_for_split(a__ , a__ ) _lowerCAmelCase =os.path.join(a__ , a__ ) if shard_lengths: _lowerCAmelCase =len(a__ ) _lowerCAmelCase =[F'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(a__ )] if filetype_suffix: _lowerCAmelCase =[filename + F'''.{filetype_suffix}''' for filename in filenames] return filenames else: _lowerCAmelCase =prefix if filetype_suffix: filename += F'''.{filetype_suffix}''' return [filename]
58
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING lowercase_ = logging.get_logger(__name__) lowercase_ = { '''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE ( __lowercase): """simple docstring""" lowercase : Tuple = 'blip_2_vision_model' def __init__( self , __A=1408 , __A=6144 , __A=39 , __A=16 , __A=224 , __A=14 , __A="gelu" , __A=0.00_001 , __A=0.0 , __A=1E-10 , __A=True , **__A , ) -> int: super().__init__(**__A ) _lowerCAmelCase =hidden_size _lowerCAmelCase =intermediate_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =patch_size _lowerCAmelCase =image_size _lowerCAmelCase =initializer_range _lowerCAmelCase =attention_dropout _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =hidden_act _lowerCAmelCase =qkv_bias @classmethod def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) _lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": _lowerCAmelCase =config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( __lowercase): """simple docstring""" lowercase : int = 'blip_2_qformer' def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.02 , __A=1E-12 , __A=0 , __A="absolute" , __A=2 , __A=1408 , **__A , ) -> List[str]: super().__init__(pad_token_id=__A , **__A ) _lowerCAmelCase =vocab_size _lowerCAmelCase =hidden_size _lowerCAmelCase =num_hidden_layers _lowerCAmelCase =num_attention_heads _lowerCAmelCase =hidden_act _lowerCAmelCase =intermediate_size _lowerCAmelCase =hidden_dropout_prob _lowerCAmelCase =attention_probs_dropout_prob _lowerCAmelCase =max_position_embeddings _lowerCAmelCase =initializer_range _lowerCAmelCase =layer_norm_eps _lowerCAmelCase =position_embedding_type _lowerCAmelCase =cross_attention_frequency _lowerCAmelCase =encoder_hidden_size @classmethod def UpperCamelCase__ ( cls , __A , **__A ) -> "PretrainedConfig": cls._set_token_in_kwargs(__A ) _lowerCAmelCase , _lowerCAmelCase =cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from Blip2Config if config_dict.get('model_type' ) == "blip-2": _lowerCAmelCase =config_dict['qformer_config'] if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE ( __lowercase): """simple docstring""" lowercase : Optional[int] = 'blip-2' lowercase : Any = True def __init__( self , __A=None , __A=None , __A=None , __A=32 , **__A ) -> int: super().__init__(**__A ) if vision_config is None: _lowerCAmelCase ={} logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' ) if qformer_config is None: _lowerCAmelCase ={} logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' ) if text_config is None: _lowerCAmelCase ={} logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' ) _lowerCAmelCase =BlipaVisionConfig(**__A ) _lowerCAmelCase =BlipaQFormerConfig(**__A ) _lowerCAmelCase =text_config['model_type'] if 'model_type' in text_config else 'opt' _lowerCAmelCase =CONFIG_MAPPING[text_model_type](**__A ) _lowerCAmelCase =self.text_config.tie_word_embeddings _lowerCAmelCase =self.text_config.is_encoder_decoder _lowerCAmelCase =num_query_tokens _lowerCAmelCase =self.vision_config.hidden_size _lowerCAmelCase =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES _lowerCAmelCase =1.0 _lowerCAmelCase =0.02 @classmethod def UpperCamelCase__ ( cls , __A , __A , __A , **__A , ) -> Any: return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def UpperCamelCase__ ( self ) -> Tuple: _lowerCAmelCase =copy.deepcopy(self.__dict__ ) _lowerCAmelCase =self.vision_config.to_dict() _lowerCAmelCase =self.qformer_config.to_dict() _lowerCAmelCase =self.text_config.to_dict() _lowerCAmelCase =self.__class__.model_type return output
58
1
def SCREAMING_SNAKE_CASE ( snake_case ) -> float: return 10 - x * x def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> float: # Bolzano theory in order to find if there is a root between a and b if equation(snake_case ) * equation(snake_case ) >= 0: raise ValueError('Wrong space!' ) __lowercase = a while (b - a) >= 0.01: # Find middle point __lowercase = (a + b) / 2 # Check if middle point is root if equation(snake_case ) == 0.0: break # Decide the side to repeat the steps if equation(snake_case ) * equation(snake_case ) < 0: __lowercase = c else: __lowercase = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
375
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> list[tuple[int, int]]: __lowercase , __lowercase = position __lowercase = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] __lowercase = [] for position in positions: __lowercase , __lowercase = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(snake_case ) return permissible_positions def SCREAMING_SNAKE_CASE ( snake_case ) -> bool: return not any(elem == 0 for row in board for elem in row ) def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> bool: if is_complete(snake_case ): return True for position in get_valid_pos(snake_case , len(snake_case ) ): __lowercase , __lowercase = position if board[y][x] == 0: __lowercase = curr + 1 if open_knight_tour_helper(snake_case , snake_case , curr + 1 ): return True __lowercase = 0 return False def SCREAMING_SNAKE_CASE ( snake_case ) -> list[list[int]]: __lowercase = [[0 for i in range(snake_case )] for j in range(snake_case )] for i in range(snake_case ): for j in range(snake_case ): __lowercase = 1 if open_knight_tour_helper(snake_case , (i, j) , 1 ): return board __lowercase = 0 __lowercase = F"Open Kight Tour cannot be performed on a board of size {n}" raise ValueError(snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
375
1
from ...configuration_utils import PretrainedConfig from ...utils import logging a : Any =logging.get_logger(__name__) a : List[Any] ={ """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json""" # See all FNet models at https://huggingface.co/models?filter=fnet } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[Any] = '''fnet''' def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple=3_2_0_0_0 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 ,SCREAMING_SNAKE_CASE__ : str=1_2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : List[str]="gelu_new" ,SCREAMING_SNAKE_CASE__ : Dict=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1E-12 ,SCREAMING_SNAKE_CASE__ : str=False ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 ,SCREAMING_SNAKE_CASE__ : int=2 ,**SCREAMING_SNAKE_CASE__ : int ,): super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__) __lowerCamelCase : Dict = vocab_size __lowerCamelCase : str = max_position_embeddings __lowerCamelCase : List[str] = hidden_size __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : str = intermediate_size __lowerCamelCase : str = hidden_act __lowerCamelCase : str = hidden_dropout_prob __lowerCamelCase : Optional[int] = initializer_range __lowerCamelCase : Dict = type_vocab_size __lowerCamelCase : List[str] = layer_norm_eps __lowerCamelCase : int = use_tpu_fourier_optimizations __lowerCamelCase : List[str] = tpu_short_seq_length
701
from ...configuration_utils import PretrainedConfig from ...utils import logging a =logging.get_logger(__name__) a ={ """microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""", # See all BioGPT models at https://huggingface.co/models?filter=biogpt } class A_ ( SCREAMING_SNAKE_CASE ): _UpperCAmelCase : Optional[int] = '''biogpt''' def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict=4_2_3_8_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=2_4 ,SCREAMING_SNAKE_CASE__ : int=1_6 ,SCREAMING_SNAKE_CASE__ : Tuple=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : str=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=1 ,SCREAMING_SNAKE_CASE__ : int=0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,): __lowerCamelCase : List[Any] = vocab_size __lowerCamelCase : str = max_position_embeddings __lowerCamelCase : Union[str, Any] = hidden_size __lowerCamelCase : Optional[int] = num_hidden_layers __lowerCamelCase : Tuple = num_attention_heads __lowerCamelCase : Union[str, Any] = intermediate_size __lowerCamelCase : List[Any] = hidden_act __lowerCamelCase : Dict = hidden_dropout_prob __lowerCamelCase : Any = attention_probs_dropout_prob __lowerCamelCase : Dict = initializer_range __lowerCamelCase : Union[str, Any] = layer_norm_eps __lowerCamelCase : Dict = scale_embedding __lowerCamelCase : Optional[int] = use_cache __lowerCamelCase : Optional[int] = layerdrop __lowerCamelCase : Tuple = activation_dropout super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
337
0
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A( UpperCamelCase ): '''simple docstring''' UpperCamelCase = (EulerDiscreteScheduler,) UpperCamelCase = 10 def a__ ( self : Dict , **A_ : int ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = { 'num_train_timesteps': 1100, 'beta_start': 0.0001, 'beta_end': 0.02, 'beta_schedule': 'linear', } config.update(**A_ ) return config def a__ ( self : Tuple ) -> Dict: """simple docstring""" for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def a__ ( self : Optional[int] ) -> Dict: """simple docstring""" for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=A_ , beta_end=A_ ) def a__ ( self : Tuple ) -> Optional[int]: """simple docstring""" for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=A_ ) def a__ ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase_ = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ ) lowerCamelCase_ = model(A_ , A_ ) lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(A_ ) ) lowerCamelCase_ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def a__ ( self : Dict ) -> Dict: """simple docstring""" lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config(prediction_type='v_prediction' ) lowerCamelCase_ = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase_ = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ ) lowerCamelCase_ = model(A_ , A_ ) lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(A_ ) ) lowerCamelCase_ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3 def a__ ( self : Any ) -> Any: """simple docstring""" lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps , device=A_ ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCamelCase_ = sample.to(A_ ) for t in scheduler.timesteps: lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ ) lowerCamelCase_ = model(A_ , A_ ) lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(A_ ) ) lowerCamelCase_ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def a__ ( self : Optional[Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = self.scheduler_classes[0] lowerCamelCase_ = self.get_scheduler_config() lowerCamelCase_ = scheduler_class(**A_ , use_karras_sigmas=A_ ) scheduler.set_timesteps(self.num_inference_steps , device=A_ ) lowerCamelCase_ = torch.manual_seed(0 ) lowerCamelCase_ = self.dummy_model() lowerCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() lowerCamelCase_ = sample.to(A_ ) for t in scheduler.timesteps: lowerCamelCase_ = scheduler.scale_model_input(A_ , A_ ) lowerCamelCase_ = model(A_ , A_ ) lowerCamelCase_ = scheduler.step(A_ , A_ , A_ , generator=A_ ) lowerCamelCase_ = output.prev_sample lowerCamelCase_ = torch.sum(torch.abs(A_ ) ) lowerCamelCase_ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 124.52299499511719 ) < 1E-2 assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
70
def __lowerCamelCase ( _lowercase ) -> list: for i in range(len(_lowercase ) - 1 , 0 , -1 ): UpperCamelCase = False for j in range(_lowercase , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: UpperCamelCase , UpperCamelCase = unsorted[j - 1], unsorted[j] UpperCamelCase = True for j in range(_lowercase ): if unsorted[j] > unsorted[j + 1]: UpperCamelCase , UpperCamelCase = unsorted[j + 1], unsorted[j] UpperCamelCase = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() _snake_case = input('''Enter numbers separated by a comma:\n''').strip() _snake_case = [int(item) for item in user_input.split(''',''')] print(F"{cocktail_shaker_sort(unsorted) = }")
282
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer lowercase : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase : int = { 'vocab_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt' ), 'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt', 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'google/electra-small-generator': ( 'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json' ), 'google/electra-base-generator': ( 'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json' ), 'google/electra-large-generator': ( 'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json' ), 'google/electra-small-discriminator': ( 'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json' ), 'google/electra-base-discriminator': ( 'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json' ), 'google/electra-large-discriminator': ( 'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json' ), }, } lowercase : Tuple = { 'google/electra-small-generator': 512, 'google/electra-base-generator': 512, 'google/electra-large-generator': 512, 'google/electra-small-discriminator': 512, 'google/electra-base-discriminator': 512, 'google/electra-large-discriminator': 512, } lowercase : str = { 'google/electra-small-generator': {'do_lower_case': True}, 'google/electra-base-generator': {'do_lower_case': True}, 'google/electra-large-generator': {'do_lower_case': True}, 'google/electra-small-discriminator': {'do_lower_case': True}, 'google/electra-base-discriminator': {'do_lower_case': True}, 'google/electra-large-discriminator': {'do_lower_case': True}, } class lowerCamelCase__ ( __lowercase): '''simple docstring''' _A = VOCAB_FILES_NAMES _A = PRETRAINED_VOCAB_FILES_MAP _A = PRETRAINED_INIT_CONFIGURATION _A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _A = ElectraTokenizer def __init__( self :Optional[Any] , a :List[Any]=None , a :Tuple=None , a :Tuple=True , a :Dict="[UNK]" , a :Optional[int]="[SEP]" , a :str="[PAD]" , a :Union[str, Any]="[CLS]" , a :int="[MASK]" , a :int=True , a :int=None , **a :List[str] , ) -> str: super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) __UpperCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , a ) != do_lower_case or normalizer_state.get("strip_accents" , a ) != strip_accents or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars ): __UpperCamelCase : Union[str, Any] = getattr(a , normalizer_state.pop("type" ) ) __UpperCamelCase : Union[str, Any] = do_lower_case __UpperCamelCase : Optional[int] = strip_accents __UpperCamelCase : str = tokenize_chinese_chars __UpperCamelCase : List[str] = normalizer_class(**a ) __UpperCamelCase : List[str] = do_lower_case def _lowerCamelCase ( self :Any , a :Any , a :Union[str, Any]=None ) -> Optional[Any]: __UpperCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self :Optional[int] , a :List[int] , a :Optional[List[int]] = None ) -> List[int]: __UpperCamelCase : int = [self.sep_token_id] __UpperCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self :int , a :str , a :Optional[str] = None ) -> Tuple[str]: __UpperCamelCase : Optional[Any] = self._tokenizer.model.save(a , name=a ) return tuple(a )
702
import re import string import numpy as np import datasets lowercase : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n' lowercase : List[str] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n' lowercase : List[str] = '\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class lowerCamelCase__ ( datasets.Metric): '''simple docstring''' def _lowerCamelCase ( self :Dict ) -> Optional[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , reference_urls=[] , ) def _lowerCamelCase ( self :int , a :Optional[Any] , a :Dict , a :Optional[int]=None , a :int=False , a :Tuple=False , a :Optional[int]=False , ) -> Any: if regexes_to_ignore is not None: for s in regexes_to_ignore: __UpperCamelCase : List[Any] = np.array([re.sub(a , "" , a ) for x in predictions] ) __UpperCamelCase : Optional[Any] = np.array([re.sub(a , "" , a ) for x in references] ) else: __UpperCamelCase : Optional[int] = np.asarray(a ) __UpperCamelCase : List[str] = np.asarray(a ) if ignore_case: __UpperCamelCase : Optional[int] = np.char.lower(a ) __UpperCamelCase : str = np.char.lower(a ) if ignore_punctuation: __UpperCamelCase : Tuple = string.punctuation.maketrans("" , "" , string.punctuation ) __UpperCamelCase : int = np.char.translate(a , table=a ) __UpperCamelCase : str = np.char.translate(a , table=a ) if ignore_numbers: __UpperCamelCase : List[str] = string.digits.maketrans("" , "" , string.digits ) __UpperCamelCase : Tuple = np.char.translate(a , table=a ) __UpperCamelCase : Union[str, Any] = np.char.translate(a , table=a ) __UpperCamelCase : List[Any] = predictions == references return {"exact_match": np.mean(a ) * 1_0_0}
94
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand lowerCAmelCase : Union[str, Any] = ( '''4S 3H 2C 7S 5H''', '''9D 8H 2C 6S 7H''', '''2D 6D 9D TH 7D''', '''TC 8C 2S JH 6C''', '''JH 8S TH AH QH''', '''TS KS 5S 9S AC''', '''KD 6S 9D TH AD''', '''KS 8D 4D 9S 4S''', # pair '''8C 4S KH JS 4D''', # pair '''QH 8H KD JH 8S''', # pair '''KC 4H KS 2H 8D''', # pair '''KD 4S KC 3H 8S''', # pair '''AH 8S AS KC JH''', # pair '''3H 4C 4H 3S 2H''', # 2 pairs '''5S 5D 2C KH KH''', # 2 pairs '''3C KH 5D 5S KH''', # 2 pairs '''AS 3C KH AD KH''', # 2 pairs '''7C 7S 3S 7H 5S''', # 3 of a kind '''7C 7S KH 2H 7H''', # 3 of a kind '''AC KH QH AH AS''', # 3 of a kind '''2H 4D 3C AS 5S''', # straight (low ace) '''3C 5C 4C 2C 6H''', # straight '''6S 8S 7S 5H 9H''', # straight '''JS QS 9H TS KH''', # straight '''QC KH TS JS AH''', # straight (high ace) '''8C 9C 5C 3C TC''', # flush '''3S 8S 9S 5S KS''', # flush '''4C 5C 9C 8C KC''', # flush '''JH 8H AH KH QH''', # flush '''3D 2H 3H 2C 2D''', # full house '''2H 2C 3S 3H 3D''', # full house '''KH KC 3S 3H 3D''', # full house '''JC 6H JS JD JH''', # 4 of a kind '''JC 7H JS JD JH''', # 4 of a kind '''JC KH JS JD JH''', # 4 of a kind '''2S AS 4S 5S 3S''', # straight flush (low ace) '''2D 6D 3D 4D 5D''', # straight flush '''5C 6C 3C 7C 4C''', # straight flush '''JH 9H TH KH QH''', # straight flush '''JH AH TH KH QH''', # royal flush (high ace straight flush) ) lowerCAmelCase : int = ( ('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''), ('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''), ('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''), ('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''), ('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''), ('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''), ('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''), ('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''), ('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''), ('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''), ('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''), ('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''), ('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''), ('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''), ('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''), ('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''), ('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''), ('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''), ('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''), ('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''), ('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''), ('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''), ('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''), ('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''), ('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''), ('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''), ('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''), ('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''), ('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''), ('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''), ) lowerCAmelCase : Union[str, Any] = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', True), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', False), ('''AS 3S 4S 8S 2S''', True), ) lowerCAmelCase : str = ( ('''2H 3H 4H 5H 6H''', True), ('''AS AH 2H AD AC''', False), ('''2H 3H 5H 6H 7H''', False), ('''KS AS TS QS JS''', True), ('''8H 9H QS JS TH''', True), ) lowerCAmelCase : Union[str, Any] = ( ('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]), ('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]), ('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]), ('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]), ) lowerCAmelCase : Union[str, Any] = ( ('''JH AH TH KH QH''', 0), ('''JH 9H TH KH QH''', 0), ('''JC KH JS JD JH''', 7), ('''KH KC 3S 3H 3D''', 6), ('''8C 9C 5C 3C TC''', 0), ('''JS QS 9H TS KH''', 0), ('''7C 7S KH 2H 7H''', 3), ('''3C KH 5D 5S KH''', 2), ('''QH 8H KD JH 8S''', 1), ('''2D 6D 9D TH 7D''', 0), ) lowerCAmelCase : str = ( ('''JH AH TH KH QH''', 23), ('''JH 9H TH KH QH''', 22), ('''JC KH JS JD JH''', 21), ('''KH KC 3S 3H 3D''', 20), ('''8C 9C 5C 3C TC''', 19), ('''JS QS 9H TS KH''', 18), ('''7C 7S KH 2H 7H''', 17), ('''3C KH 5D 5S KH''', 16), ('''QH 8H KD JH 8S''', 15), ('''2D 6D 9D TH 7D''', 14), ) def _lowercase ( ): snake_case__ , snake_case__ = randrange(len(__UpperCamelCase ) ), randrange(len(__UpperCamelCase ) ) snake_case__ = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)] snake_case__ , snake_case__ = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def _lowercase ( __UpperCamelCase : int = 100 ): return (generate_random_hand() for _ in range(__UpperCamelCase )) @pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase ) def _lowercase ( __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ): assert PokerHand(__UpperCamelCase )._is_flush() == expected @pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase ) def _lowercase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ): assert PokerHand(__UpperCamelCase )._is_straight() == expected @pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCamelCase ) def _lowercase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ): snake_case__ = PokerHand(__UpperCamelCase ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase ) def _lowercase ( __UpperCamelCase : int , __UpperCamelCase : Any ): assert PokerHand(__UpperCamelCase )._is_same_kind() == expected @pytest.mark.parametrize("""hand, expected""" , __UpperCamelCase ) def _lowercase ( __UpperCamelCase : Dict , __UpperCamelCase : int ): assert PokerHand(__UpperCamelCase )._hand_type == expected @pytest.mark.parametrize("""hand, other, expected""" , __UpperCamelCase ) def _lowercase ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] ): assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected @pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() ) def _lowercase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ): assert PokerHand(__UpperCamelCase ).compare_with(PokerHand(__UpperCamelCase ) ) == expected def _lowercase ( ): snake_case__ = [PokerHand(__UpperCamelCase ) for hand in SORTED_HANDS] snake_case__ = poker_hands.copy() shuffle(__UpperCamelCase ) snake_case__ = chain(sorted(__UpperCamelCase ) ) for index, hand in enumerate(__UpperCamelCase ): assert hand == poker_hands[index] def _lowercase ( ): # Test that five high straights are compared correctly. snake_case__ = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )] pokerhands.sort(reverse=__UpperCamelCase ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def _lowercase ( ): # Multiple calls to five_high_straight function should still return True # and shouldn't mutate the list in every call other than the first. snake_case__ = PokerHand("""2C 4S AS 3D 5C""" ) snake_case__ = True snake_case__ = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def _lowercase ( ): # Problem number 54 from Project Euler # Testing from poker_hands.txt file snake_case__ = 0 snake_case__ = os.path.abspath(os.path.dirname(__UpperCamelCase ) ) snake_case__ = os.path.join(__UpperCamelCase , """poker_hands.txt""" ) with open(__UpperCamelCase ) as file_hand: for line in file_hand: snake_case__ = line[:14].strip() snake_case__ = line[15:].strip() snake_case__ , snake_case__ = PokerHand(__UpperCamelCase ), PokerHand(__UpperCamelCase ) snake_case__ = player.compare_with(__UpperCamelCase ) if output == "Win": answer += 1 assert answer == 376
214
from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) lowerCAmelCase : Union[str, Any] = _symbol_database.Default() lowerCAmelCase : int = _descriptor_pool.Default().AddSerializedFile( b'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03''' ) lowerCAmelCase : List[str] = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals) if _descriptor._USE_C_DESCRIPTORS is False: lowerCAmelCase : Tuple = None lowerCAmelCase : List[Any] = b'''H\003''' # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" lowerCAmelCase : Tuple = 45 lowerCAmelCase : Optional[int] = 1581 lowerCAmelCase : Dict = 1517 lowerCAmelCase : Any = 1570 lowerCAmelCase : Any = 1584 lowerCAmelCase : Optional[Any] = 1793 lowerCAmelCase : Optional[Any] = 1795 lowerCAmelCase : List[str] = 1916 lowerCAmelCase : Any = 1864 lowerCAmelCase : Dict = 1905 lowerCAmelCase : Dict = 1919 lowerCAmelCase : Any = 2429 lowerCAmelCase : List[Any] = 2208 lowerCAmelCase : Tuple = 2418 lowerCAmelCase : List[Any] = 2323 lowerCAmelCase : List[str] = 2407 # @@protoc_insertion_point(module_scope)
214
1
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class _SCREAMING_SNAKE_CASE ( __snake_case ): """simple docstring""" _SCREAMING_SNAKE_CASE ='Wav2Vec2FeatureExtractor' _SCREAMING_SNAKE_CASE ='AutoTokenizer' def __init__( self: Optional[Any] , __A: str , __A: int ): '''simple docstring''' super().__init__(__A , __A ) a__ = self.feature_extractor a__ = False @classmethod def lowercase ( cls: Any , __A: Tuple , **__A: Dict ): '''simple docstring''' try: return super().from_pretrained(__A , **__A ) except OSError: warnings.warn( F'Loading a tokenizer inside {cls.__name__} from a config that does not' ''' include a `tokenizer_class` attribute is deprecated and will be ''' '''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`''' ''' attribute to either your `config.json` or `tokenizer_config.json` ''' '''file to suppress this warning: ''' , __A , ) a__ = WavaVecaFeatureExtractor.from_pretrained(__A , **__A ) a__ = WavaVecaCTCTokenizer.from_pretrained(__A , **__A ) return cls(feature_extractor=__A , tokenizer=__A ) def __call__( self: Optional[Any] , *__A: str , **__A: Any ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*__A , **__A ) if "raw_speech" in kwargs: warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' ) a__ = kwargs.pop('''raw_speech''' ) else: a__ = kwargs.pop('''audio''' , __A ) a__ = kwargs.pop('''sampling_rate''' , __A ) a__ = kwargs.pop('''text''' , __A ) if len(__A ) > 0: a__ = args[0] a__ = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: a__ = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A ) if text is not None: a__ = self.tokenizer(__A , **__A ) if text is None: return inputs elif audio is None: return encodings else: a__ = encodings['''input_ids'''] return inputs def lowercase ( self: str , *__A: str , **__A: Any ): '''simple docstring''' if self._in_target_context_manager: return self.current_processor.pad(*__A , **__A ) a__ = kwargs.pop('''input_features''' , __A ) a__ = kwargs.pop('''labels''' , __A ) if len(__A ) > 0: a__ = args[0] a__ = args[1:] if input_features is not None: a__ = self.feature_extractor.pad(__A , *__A , **__A ) if labels is not None: a__ = self.tokenizer.pad(__A , **__A ) if labels is None: return input_features elif input_features is None: return labels else: a__ = labels['''input_ids'''] return input_features def lowercase ( self: Union[str, Any] , *__A: Optional[int] , **__A: Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*__A , **__A ) def lowercase ( self: int , *__A: Optional[int] , **__A: Any ): '''simple docstring''' return self.tokenizer.decode(*__A , **__A ) @contextmanager def lowercase ( self: Dict ): '''simple docstring''' warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your audio inputs, or in a separate call.''' ) a__ = True a__ = self.tokenizer yield a__ = self.feature_extractor a__ = False
200
"""simple docstring""" import flax.linen as nn import jax.numpy as jnp from .attention_flax import FlaxTransformeraDModel from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =0.0 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =jnp.floataa def lowercase ( self: Union[str, Any] ): '''simple docstring''' a__ = [] a__ = [] for i in range(self.num_layers ): a__ = self.in_channels if i == 0 else self.out_channels a__ = FlaxResnetBlockaD( in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__A ) a__ = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__A ) a__ = resnets a__ = attentions if self.add_downsample: a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Optional[int] , __A: Union[str, Any] , __A: str , __A: Optional[Any] , __A: Any=True ): '''simple docstring''' a__ = () for resnet, attn in zip(self.resnets , self.attentions ): a__ = resnet(__A , __A , deterministic=__A ) a__ = attn(__A , __A , deterministic=__A ) output_states += (hidden_states,) if self.add_downsample: a__ = self.downsamplers_a(__A ) output_states += (hidden_states,) return hidden_states, output_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =0.0 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =jnp.floataa def lowercase ( self: Union[str, Any] ): '''simple docstring''' a__ = [] for i in range(self.num_layers ): a__ = self.in_channels if i == 0 else self.out_channels a__ = FlaxResnetBlockaD( in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__A ) a__ = resnets if self.add_downsample: a__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Dict , __A: int , __A: Dict , __A: Optional[Any]=True ): '''simple docstring''' a__ = () for resnet in self.resnets: a__ = resnet(__A , __A , deterministic=__A ) output_states += (hidden_states,) if self.add_downsample: a__ = self.downsamplers_a(__A ) output_states += (hidden_states,) return hidden_states, output_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =0.0 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =jnp.floataa def lowercase ( self: Optional[int] ): '''simple docstring''' a__ = [] a__ = [] for i in range(self.num_layers ): a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels a__ = self.prev_output_channel if i == 0 else self.out_channels a__ = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__A ) a__ = FlaxTransformeraDModel( in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__A ) a__ = resnets a__ = attentions if self.add_upsample: a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Any , __A: Optional[int] , __A: List[Any] , __A: List[str] , __A: Optional[Any] , __A: Any=True ): '''simple docstring''' for resnet, attn in zip(self.resnets , self.attentions ): # pop res hidden states a__ = res_hidden_states_tuple[-1] a__ = res_hidden_states_tuple[:-1] a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) a__ = resnet(__A , __A , deterministic=__A ) a__ = attn(__A , __A , deterministic=__A ) if self.add_upsample: a__ = self.upsamplers_a(__A ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =0.0 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =True _SCREAMING_SNAKE_CASE =jnp.floataa def lowercase ( self: str ): '''simple docstring''' a__ = [] for i in range(self.num_layers ): a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels a__ = self.prev_output_channel if i == 0 else self.out_channels a__ = FlaxResnetBlockaD( in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__A ) a__ = resnets if self.add_upsample: a__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype ) def __call__( self: Tuple , __A: Optional[Any] , __A: Optional[Any] , __A: Union[str, Any] , __A: Dict=True ): '''simple docstring''' for resnet in self.resnets: # pop res hidden states a__ = res_hidden_states_tuple[-1] a__ = res_hidden_states_tuple[:-1] a__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 ) a__ = resnet(__A , __A , deterministic=__A ) if self.add_upsample: a__ = self.upsamplers_a(__A ) return hidden_states class _SCREAMING_SNAKE_CASE ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE =42 _SCREAMING_SNAKE_CASE =0.0 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =1 _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =False _SCREAMING_SNAKE_CASE =jnp.floataa def lowercase ( self: Tuple ): '''simple docstring''' a__ = [ FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) ] a__ = [] for _ in range(self.num_layers ): a__ = FlaxTransformeraDModel( in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) attentions.append(__A ) a__ = FlaxResnetBlockaD( in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , ) resnets.append(__A ) a__ = resnets a__ = attentions def __call__( self: Any , __A: Optional[int] , __A: int , __A: Tuple , __A: str=True ): '''simple docstring''' a__ = self.resnets[0](__A , __A ) for attn, resnet in zip(self.attentions , self.resnets[1:] ): a__ = attn(__A , __A , deterministic=__A ) a__ = resnet(__A , __A , deterministic=__A ) return hidden_states
200
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() _snake_case : List[Any] = logging.get_logger(__name__) def a_ ( lowerCAmelCase_ : str ): __lowerCAmelCase = 'huggingface/label-files' __lowerCAmelCase = 'imagenet-1k-id2label.json' __lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) ) __lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} __lowerCAmelCase = {v: k for k, v in idalabel.items()} __lowerCAmelCase = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" __lowerCAmelCase = BitConfig( conv_layer=lowerCAmelCase_, num_labels=1000, idalabel=lowerCAmelCase_, labelaid=lowerCAmelCase_, ) return config def a_ ( lowerCAmelCase_ : List[str] ): if "stem.conv" in name: __lowerCAmelCase = name.replace('stem.conv', 'bit.embedder.convolution' ) if "blocks" in name: __lowerCAmelCase = name.replace('blocks', 'layers' ) if "head.fc" in name: __lowerCAmelCase = name.replace('head.fc', 'classifier.1' ) if name.startswith('norm' ): __lowerCAmelCase = 'bit.' + name if "bit" not in name and "classifier" not in name: __lowerCAmelCase = 'bit.encoder.' + name return name def a_ ( ): __lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw ) return im @torch.no_grad() def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Any, lowerCAmelCase_ : Optional[int]=False ): __lowerCAmelCase = get_config(lowerCAmelCase_ ) # load original model from timm __lowerCAmelCase = create_model(lowerCAmelCase_, pretrained=lowerCAmelCase_ ) timm_model.eval() # load state_dict of original model __lowerCAmelCase = timm_model.state_dict() for key in state_dict.copy().keys(): __lowerCAmelCase = state_dict.pop(lowerCAmelCase_ ) __lowerCAmelCase = val.squeeze() if 'head' in key else val # load HuggingFace model __lowerCAmelCase = BitForImageClassification(lowerCAmelCase_ ) model.eval() model.load_state_dict(lowerCAmelCase_ ) # create image processor __lowerCAmelCase = create_transform(**resolve_data_config({}, model=lowerCAmelCase_ ) ) __lowerCAmelCase = transform.transforms __lowerCAmelCase = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } __lowerCAmelCase = BitImageProcessor( do_resize=lowerCAmelCase_, size={'shortest_edge': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=lowerCAmelCase_, crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]}, do_normalize=lowerCAmelCase_, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) __lowerCAmelCase = prepare_img() __lowerCAmelCase = transform(lowerCAmelCase_ ).unsqueeze(0 ) __lowerCAmelCase = processor(lowerCAmelCase_, return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(lowerCAmelCase_, lowerCAmelCase_ ) # verify logits with torch.no_grad(): __lowerCAmelCase = model(lowerCAmelCase_ ) __lowerCAmelCase = outputs.logits print('Logits:', logits[0, :3] ) print('Predicted class:', model.config.idalabel[logits.argmax(-1 ).item()] ) __lowerCAmelCase = timm_model(lowerCAmelCase_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(F"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: print(F"""Pushing model {model_name} and processor to the hub""" ) model.push_to_hub(F"""ybelkada/{model_name}""" ) processor.push_to_hub(F"""ybelkada/{model_name}""" ) if __name__ == "__main__": _snake_case : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='resnetv2_50x1_bitm', type=str, help='Name of the BiT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model to the hub.', ) _snake_case : Dict = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
53
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def UpperCAmelCase ( a__ , a__ , a__ , a__ ): '''simple docstring''' for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def UpperCAmelCase ( a__ , a__ , a__ , a__ , a__=True ): '''simple docstring''' model.train() lowerCAmelCase :Dict = model(a__ ) lowerCAmelCase :Union[str, Any] = F.mse_loss(a__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(a__ ) def UpperCAmelCase ( a__ , a__=False ): '''simple docstring''' set_seed(42 ) lowerCAmelCase :Any = RegressionModel() lowerCAmelCase :List[str] = deepcopy(a__ ) lowerCAmelCase :str = RegressionDataset(length=80 ) lowerCAmelCase :Optional[Any] = DataLoader(a__ , batch_size=16 ) model.to(accelerator.device ) if sched: lowerCAmelCase :List[Any] = AdamW(params=model.parameters() , lr=1e-3 ) lowerCAmelCase :Dict = AdamW(params=ddp_model.parameters() , lr=1e-3 ) lowerCAmelCase :Tuple = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 ) lowerCAmelCase :Optional[Any] = LambdaLR(a__ , lr_lambda=lambda a__ : epoch**0.65 ) # Make a copy of `model` if sched: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :int = accelerator.prepare(a__ , a__ , a__ , a__ ) else: lowerCAmelCase , lowerCAmelCase :Optional[int] = accelerator.prepare(a__ , a__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def UpperCAmelCase ( a__ ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Any = get_training_setup(a__ ) # Use a single batch lowerCAmelCase , lowerCAmelCase :Any = next(iter(a__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :Optional[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a__ ): step_model(a__ , a__ , a__ , a__ ) else: # Sync grads step_model(a__ , a__ , a__ , a__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(a__ , a__ , a__ , a__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowerCAmelCase :Any = ddp_input[torch.randperm(len(a__ ) )] def UpperCAmelCase ( a__ ): '''simple docstring''' lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Dict = get_training_setup(a__ ) # Use a single batch lowerCAmelCase , lowerCAmelCase :str = next(iter(a__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :List[Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :int = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(a__ ): step_model(a__ , a__ , a__ , a__ ) else: # Sync grads step_model(a__ , a__ , a__ , a__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowerCAmelCase :str = ddp_input[torch.randperm(len(a__ ) )] def UpperCAmelCase ( a__=False , a__=False ): '''simple docstring''' lowerCAmelCase :str = Accelerator( split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Optional[Any] = get_training_setup(a__ ) for iteration, batch in enumerate(a__ ): lowerCAmelCase , lowerCAmelCase :Union[str, Any] = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :Dict = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(a__ , a__ , a__ , a__ , a__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(a__ ): step_model(a__ , a__ , a__ , a__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(a__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) lowerCAmelCase :Optional[int] = ddp_input[torch.randperm(len(a__ ) )] GradientState._reset_state() def UpperCAmelCase ( a__=False , a__=False ): '''simple docstring''' lowerCAmelCase :Optional[int] = Accelerator( split_batches=a__ , dispatch_batches=a__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :Union[str, Any] = get_training_setup(a__ , a__ ) for iteration, batch in enumerate(a__ ): lowerCAmelCase , lowerCAmelCase :Optional[int] = batch.values() # Gather the distributed inputs and targs for the base model lowerCAmelCase , lowerCAmelCase :Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) lowerCAmelCase , lowerCAmelCase :Optional[Any] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(a__ , a__ , a__ , a__ , a__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(a__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(a__ ): step_model(a__ , a__ , a__ , a__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" lowerCAmelCase :int = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(a__ )) if accelerator.num_processes > 1: check_model_parameters(a__ , a__ , a__ , a__ ) # Shuffle ddp_input on each iteration torch.manual_seed(13_37 + iteration ) GradientState._reset_state() def UpperCAmelCase ( ): '''simple docstring''' lowerCAmelCase :int = Accelerator() lowerCAmelCase :int = RegressionDataset(length=80 ) lowerCAmelCase :Optional[Any] = DataLoader(a__ , batch_size=16 ) lowerCAmelCase :Any = RegressionDataset(length=96 ) lowerCAmelCase :Dict = DataLoader(a__ , batch_size=16 ) lowerCAmelCase , lowerCAmelCase :Tuple = accelerator.prepare(a__ , a__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(a__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a__ ) if iteration < len(a__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(a__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(a__ ) if batch_num < len(a__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def UpperCAmelCase ( ): '''simple docstring''' lowerCAmelCase :List[str] = Accelerator() lowerCAmelCase :Optional[int] = accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(a__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(a__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(a__ , a__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(a__ , a__ ) def UpperCAmelCase ( a__ ): '''simple docstring''' main() if __name__ == "__main__": main()
553
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowercase : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Dict = [ 'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST', 'UniSpeechForCTC', 'UniSpeechForPreTraining', 'UniSpeechForSequenceClassification', 'UniSpeechModel', 'UniSpeechPreTrainedModel', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
397
"""simple docstring""" # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowercase__ ( snake_case_ :Tuple ): return 1 / (1 + np.exp(-z )) def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Dict ): return (-y * np.log(snake_case_ ) - (1 - y) * np.log(1 - h )).mean() def lowercase__ ( snake_case_ :List[Any] , snake_case_ :Dict , snake_case_ :Dict ): __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) return np.sum(y * scores - np.log(1 + np.exp(snake_case_ ) ) ) def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Optional[Any] , snake_case_ :List[Any] , snake_case_ :str=70_000 ): __UpperCAmelCase = np.zeros(x.shape[1] ) for iterations in range(snake_case_ ): __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) __UpperCAmelCase = sigmoid_function(snake_case_ ) __UpperCAmelCase = np.dot(x.T , h - y ) / y.size __UpperCAmelCase = theta - alpha * gradient # updating the weights __UpperCAmelCase = np.dot(snake_case_ , snake_case_ ) __UpperCAmelCase = sigmoid_function(snake_case_ ) __UpperCAmelCase = cost_function(snake_case_ , snake_case_ ) if iterations % 100 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _lowercase : Dict = datasets.load_iris() _lowercase : Dict = iris.data[:, :2] _lowercase : List[Any] = (iris.target != 0) * 1 _lowercase : Union[str, Any] = 0.1 _lowercase : Optional[int] = logistic_reg(alpha, x, y, max_iterations=7_00_00) print('theta: ', theta) # printing the theta i.e our weights vector def lowercase__ ( snake_case_ :List[Any] ): return sigmoid_function( np.dot(snake_case_ , snake_case_ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1') ((_lowercase) ,(_lowercase)) : str = (x[:, 0].min(), x[:, 0].max()) ((_lowercase) ,(_lowercase)) : Tuple = (x[:, 1].min(), x[:, 1].max()) ((_lowercase) ,(_lowercase)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _lowercase : List[str] = np.c_[xxa.ravel(), xxa.ravel()] _lowercase : Dict = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black') plt.legend() plt.show()
397
1
'''simple docstring''' import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' super().__init__() snake_case: List[str] = torchvision.models.resnetaaa(pretrained=SCREAMING_SNAKE_CASE__ ) snake_case: Dict = list(model.children() )[:-2] snake_case: Dict = nn.Sequential(*SCREAMING_SNAKE_CASE__ ) snake_case: List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' snake_case: str = self.pool(self.model(SCREAMING_SNAKE_CASE__ ) ) snake_case: Optional[int] = torch.flatten(SCREAMING_SNAKE_CASE__ , start_dim=2 ) snake_case: Union[str, Any] = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class SCREAMING_SNAKE_CASE ( snake_case ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' snake_case: Union[str, Any] = [json.loads(SCREAMING_SNAKE_CASE__ ) for l in open(SCREAMING_SNAKE_CASE__ )] snake_case: Tuple = os.path.dirname(SCREAMING_SNAKE_CASE__ ) snake_case: str = tokenizer snake_case: List[Any] = labels snake_case: int = len(SCREAMING_SNAKE_CASE__ ) snake_case: Any = max_seq_length snake_case: List[str] = transforms def __len__( self ): '''simple docstring''' return len(self.data ) def __getitem__( self , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' snake_case: List[str] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=SCREAMING_SNAKE_CASE__ ) ) snake_case , snake_case , snake_case: Any = sentence[0], sentence[1:-1], sentence[-1] snake_case: Any = sentence[: self.max_seq_length] snake_case: str = torch.zeros(self.n_classes ) snake_case: Union[str, Any] = 1 snake_case: Union[str, Any] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' ) snake_case: Optional[Any] = self.transforms(SCREAMING_SNAKE_CASE__ ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _UpperCamelCase ( self ): '''simple docstring''' snake_case: Tuple = Counter() for row in self.data: label_freqs.update(row['label'] ) return label_freqs def lowerCAmelCase_ ( __A : Tuple ): '''simple docstring''' snake_case: str = [len(row['sentence'] ) for row in batch] snake_case , snake_case: Union[str, Any] = len(__A ), max(__A ) snake_case: List[Any] = torch.zeros(__A , __A , dtype=torch.long ) snake_case: int = torch.zeros(__A , __A , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(__A , __A ) ): snake_case: Optional[Any] = input_row['sentence'] snake_case: str = 1 snake_case: Dict = torch.stack([row['image'] for row in batch] ) snake_case: int = torch.stack([row['label'] for row in batch] ) snake_case: str = torch.stack([row['image_start_token'] for row in batch] ) snake_case: Dict = torch.stack([row['image_end_token'] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCAmelCase_ ( ): '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCAmelCase_ ( ): '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ), ] )
329
'''simple docstring''' import torch from torch import nn class SCREAMING_SNAKE_CASE ( nn.Module ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False ): '''simple docstring''' super().__init__() snake_case: str = n_token snake_case: Optional[Any] = d_embed snake_case: List[str] = d_proj snake_case: List[Any] = cutoffs + [n_token] snake_case: Any = [0] + self.cutoffs snake_case: Union[str, Any] = div_val snake_case: Optional[int] = self.cutoffs[0] snake_case: List[Any] = len(self.cutoffs ) - 1 snake_case: Tuple = self.shortlist_size + self.n_clusters if self.n_clusters > 0: snake_case: Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) snake_case: Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) ) snake_case: Union[str, Any] = nn.ModuleList() snake_case: List[str] = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) else: self.out_projs.append(SCREAMING_SNAKE_CASE__ ) self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) else: for i in range(len(self.cutoffs ) ): snake_case , snake_case: Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case: Tuple = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ) self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE__ , r_idx - l_idx ) ) snake_case: str = keep_order def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' if proj is None: snake_case: str = nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: snake_case: List[Any] = nn.functional.linear(SCREAMING_SNAKE_CASE__ , proj.t().contiguous() ) snake_case: Any = nn.functional.linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False ): '''simple docstring''' if labels is not None: # Shift so that tokens < n predict n snake_case: List[Any] = hidden[..., :-1, :].contiguous() snake_case: Tuple = labels[..., 1:].contiguous() snake_case: List[Any] = hidden.view(-1 , hidden.size(-1 ) ) snake_case: Dict = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('Input and labels should have the same size in the batch dimension.' ) else: snake_case: int = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: snake_case: List[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: snake_case: str = labels != -1_00 snake_case: List[str] = torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device ) snake_case: Tuple = ( -nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: snake_case: Optional[int] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 ) else: # construct weights and biases snake_case , snake_case: Tuple = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: snake_case , snake_case: List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case: List[Any] = self.out_layers[0].weight[l_idx:r_idx] snake_case: Optional[int] = self.out_layers[0].bias[l_idx:r_idx] else: snake_case: Any = self.out_layers[i].weight snake_case: List[str] = self.out_layers[i].bias if i == 0: snake_case: Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 ) snake_case: List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(SCREAMING_SNAKE_CASE__ ) biases.append(SCREAMING_SNAKE_CASE__ ) snake_case , snake_case , snake_case: int = weights[0], biases[0], self.out_projs[0] snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case: Tuple = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 ) if labels is None: snake_case: str = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: snake_case: int = torch.zeros_like(SCREAMING_SNAKE_CASE__ , dtype=hidden.dtype , device=hidden.device ) snake_case: Dict = 0 snake_case: List[str] = [0] + self.cutoffs for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ): snake_case , snake_case: str = cutoff_values[i], cutoff_values[i + 1] if labels is not None: snake_case: Optional[Any] = (labels >= l_idx) & (labels < r_idx) snake_case: Tuple = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue snake_case: List[Any] = labels.index_select(0 , SCREAMING_SNAKE_CASE__ ) - l_idx snake_case: Dict = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE__ ) snake_case: str = hidden.index_select(0 , SCREAMING_SNAKE_CASE__ ) else: snake_case: int = hidden if i == 0: if labels is not None: snake_case: List[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: snake_case: Dict = head_logprob[:, : self.cutoffs[0]] else: snake_case , snake_case , snake_case: Union[str, Any] = weights[i], biases[i], self.out_projs[i] snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case: Union[str, Any] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 ) snake_case: Any = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: snake_case: List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: snake_case: Dict = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i snake_case: Optional[int] = logprob_i if labels is not None: if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order: out.index_copy_(0 , SCREAMING_SNAKE_CASE__ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ): '''simple docstring''' if self.n_clusters == 0: snake_case: List[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=-1 ) else: # construct weights and biases snake_case , snake_case: Tuple = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: snake_case , snake_case: Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1] snake_case: List[Any] = self.out_layers[0].weight[l_idx:r_idx] snake_case: Dict = self.out_layers[0].bias[l_idx:r_idx] else: snake_case: Optional[int] = self.out_layers[i].weight snake_case: Optional[Any] = self.out_layers[i].bias if i == 0: snake_case: Any = torch.cat([weight_i, self.cluster_weight] , dim=0 ) snake_case: Any = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(SCREAMING_SNAKE_CASE__ ) biases.append(SCREAMING_SNAKE_CASE__ ) snake_case , snake_case , snake_case: List[Any] = weights[0], biases[0], self.out_projs[0] snake_case: Optional[Any] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case: List[str] = hidden.new_empty((head_logit.size(0 ), self.n_token) ) snake_case: Tuple = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 ) snake_case: Any = [0] + self.cutoffs for i in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ): snake_case , snake_case: Optional[int] = cutoff_values[i], cutoff_values[i + 1] if i == 0: snake_case: Dict = head_logprob[:, : self.cutoffs[0]] else: snake_case , snake_case , snake_case: Optional[Any] = weights[i], biases[i], self.out_projs[i] snake_case: List[str] = self._compute_logit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) snake_case: List[Any] = nn.functional.log_softmax(SCREAMING_SNAKE_CASE__ , dim=1 ) snake_case: Tuple = head_logprob[:, -i] + tail_logprob_i snake_case: Union[str, Any] = logprob_i return out
329
1
'''simple docstring''' a__ = tuple[float, float, float] a__ = tuple[float, float, float] def lowercase ( SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : Pointad ) -> Vectorad: _snake_case : Tuple = end_pointa[0] - end_pointa[0] _snake_case : Dict = end_pointa[1] - end_pointa[1] _snake_case : List[str] = end_pointa[2] - end_pointa[2] return (x, y, z) def lowercase ( SCREAMING_SNAKE_CASE__ : Vectorad , SCREAMING_SNAKE_CASE__ : Vectorad ) -> Vectorad: _snake_case : List[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i _snake_case : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _snake_case : int = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def lowercase ( SCREAMING_SNAKE_CASE__ : Vectorad , SCREAMING_SNAKE_CASE__ : int ) -> bool: return tuple(round(_lowercase , _lowercase ) for x in vector ) == (0, 0, 0) def lowercase ( SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : int = 10 ) -> bool: _snake_case : str = create_vector(_lowercase , _lowercase ) _snake_case : List[Any] = create_vector(_lowercase , _lowercase ) return is_zero_vector(get_ad_vectors_cross(_lowercase , _lowercase ) , _lowercase )
709
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def lowercase ( SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]: _snake_case : str = [False] * len(SCREAMING_SNAKE_CASE__ ) _snake_case : Dict = [-1] * len(SCREAMING_SNAKE_CASE__ ) def dfs(SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ): _snake_case : Optional[Any] = True _snake_case : Tuple = c for u in graph[v]: if not visited[u]: dfs(SCREAMING_SNAKE_CASE__ , 1 - c ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): if not visited[i]: dfs(SCREAMING_SNAKE_CASE__ , 0 ) for i in range(len(SCREAMING_SNAKE_CASE__ ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph a__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
198
0
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __A : Any = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__) class __lowerCAmelCase ( _UpperCamelCase): '''simple docstring''' __magic_name__ : bool = field(default=_UpperCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""}) __magic_name__ : bool = field( default=_UpperCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""}) __magic_name__ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `max_length` value of the model configuration.""" ) } , ) __magic_name__ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """ """to the `num_beams` value of the model configuration.""" ) } , ) __magic_name__ : Optional[Union[str, Path, GenerationConfig]] = field( default=_UpperCamelCase , metadata={ """help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.""" } , ) def _UpperCAmelCase ( self : int ): A__ : Any =super().to_dict() for k, v in d.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): A__ : Any =v.to_dict() return d
656
"""simple docstring""" import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Tuple =[] for _ in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ): """simple docstring""" A__ : Dict =[] for step in range(UpperCamelCase ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" ) torch.save(scheduler.state_dict() , UpperCamelCase ) A__ : Dict =torch.load(UpperCamelCase ) scheduler.load_state_dict(UpperCamelCase ) return lrs @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ ) def _UpperCAmelCase ( self : Tuple ): A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] ) A__ : Any =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 ) for _ in range(100 ): A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) def _UpperCAmelCase ( self : Dict ): A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ ) A__ : Dict =torch.tensor([0.4, 0.2, -0.5] ) A__ : Optional[int] =nn.MSELoss() # No warmup, constant schedule, no gradient clipping A__ : int =Adafactor( params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , ) for _ in range(1000 ): A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 ) @require_torch class __lowerCAmelCase ( unittest.TestCase): '''simple docstring''' __magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None __magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None __magic_name__ : Union[str, Any] = 10 def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ): self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ ) def _UpperCAmelCase ( self : Optional[Any] ): A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) A__ : Union[str, Any] ={ get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1E-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): A__ , A__ : Any =data A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps ) self.assertListAlmostEqual( UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , ) A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' ) class __lowerCAmelCase : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str ): A__ : int =fn def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ): return self.fn(*UpperCamelCase__ , **UpperCamelCase__ ) @classmethod def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ): A__ : str =list(map(self , scheduler.lr_lambdas ) )
656
1
'''simple docstring''' import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( """files""" , [ ["""full:README.md""", """dataset_infos.json"""], ["""empty:README.md""", """dataset_infos.json"""], ["""dataset_infos.json"""], ["""full:README.md"""], ] , ) def __A ( lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" ) if "full:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""---\ndataset_info:\n dataset_size: 42\n---""" ) if "empty:README.md" in files: with open(dataset_infos_dir / """README.md""" , """w""" ) as f: f.write("""""" ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f: f.write("""{\"default\": {\"dataset_size\": 42}}""" ) _UpperCAmelCase : List[str] = DatasetInfosDict.from_directory(lowerCAmelCase_ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( """dataset_info""" , [ DatasetInfo(), DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ), ] , ) def __A ( lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : Optional[Any] = str(lowerCAmelCase_ ) dataset_info.write_to_directory(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = DatasetInfo.from_directory(lowerCAmelCase_ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(lowerCAmelCase_ , """dataset_info.json""" ) ) def __A ( ): _UpperCAmelCase : List[str] = DatasetInfo( description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase : int = dataset_info._to_yaml_dict() assert sorted(lowerCAmelCase_ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase : List[Any] = yaml.safe_dump(lowerCAmelCase_ ) _UpperCAmelCase : List[str] = yaml.safe_load(lowerCAmelCase_ ) assert dataset_info_yaml_dict == reloaded def __A ( ): _UpperCAmelCase : List[Any] = DatasetInfo() _UpperCAmelCase : Union[str, Any] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( """dataset_infos_dict""" , [ DatasetInfosDict(), DatasetInfosDict({"""default""": DatasetInfo()} ), DatasetInfosDict({"""my_config_name""": DatasetInfo()} ), DatasetInfosDict( { """default""": DatasetInfo( description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=42 , ) } ), DatasetInfosDict( { """v1""": DatasetInfo(dataset_size=42 ), """v2""": DatasetInfo(dataset_size=1337 ), } ), ] , ) def __A ( lowerCAmelCase_ , lowerCAmelCase_ ): _UpperCAmelCase : Dict = str(lowerCAmelCase_ ) dataset_infos_dict.write_to_directory(lowerCAmelCase_ ) _UpperCAmelCase : Union[str, Any] = DatasetInfosDict.from_directory(lowerCAmelCase_ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Dict = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(lowerCAmelCase_ , """README.md""" ) )
156
'''simple docstring''' import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): snake_case : int = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING snake_case : str = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase : Tuple = AudioClassificationPipeline(model=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ ) # test with a raw waveform _UpperCAmelCase : Any = np.zeros((3_4_0_0_0,) ) _UpperCAmelCase : Optional[Any] = np.zeros((1_4_0_0_0,) ) return audio_classifier, [audioa, audio] def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ ): _UpperCAmelCase , _UpperCAmelCase : str = examples _UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ ) # by default a model is initialized with num_labels=2 self.assertEqual( lowerCAmelCase__ , [ {"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )}, {"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )}, ] , ) _UpperCAmelCase : int = audio_classifier(lowerCAmelCase__ , top_k=1 ) self.assertEqual( lowerCAmelCase__ , [ {"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )}, ] , ) self.run_torchaudio(lowerCAmelCase__ ) @require_torchaudio def snake_case_ (self , lowerCAmelCase__ ): import datasets # test with a local file _UpperCAmelCase : Optional[int] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) _UpperCAmelCase : List[str] = dataset[0]["""audio"""]["""array"""] _UpperCAmelCase : Union[str, Any] = audio_classifier(lowerCAmelCase__ ) self.assertEqual( lowerCAmelCase__ , [ {"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )}, {"""score""": ANY(lowerCAmelCase__ ), """label""": ANY(lowerCAmelCase__ )}, ] , ) @require_torch def snake_case_ (self ): _UpperCAmelCase : int = """anton-l/wav2vec2-random-tiny-classifier""" _UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ ) _UpperCAmelCase : str = np.ones((8_0_0_0,) ) _UpperCAmelCase : Tuple = audio_classifier(lowerCAmelCase__ , top_k=4 ) _UpperCAmelCase : List[Any] = [ {"""score""": 0.0_8_4_2, """label""": """no"""}, {"""score""": 0.0_8_3_8, """label""": """up"""}, {"""score""": 0.0_8_3_7, """label""": """go"""}, {"""score""": 0.0_8_3_4, """label""": """right"""}, ] _UpperCAmelCase : Any = [ {"""score""": 0.0_8_4_5, """label""": """stop"""}, {"""score""": 0.0_8_4_4, """label""": """on"""}, {"""score""": 0.0_8_4_1, """label""": """right"""}, {"""score""": 0.0_8_3_4, """label""": """left"""}, ] self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) _UpperCAmelCase : Any = {"""array""": np.ones((8_0_0_0,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate} _UpperCAmelCase : List[Any] = audio_classifier(lowerCAmelCase__ , top_k=4 ) self.assertIn(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def snake_case_ (self ): import datasets _UpperCAmelCase : int = """superb/wav2vec2-base-superb-ks""" _UpperCAmelCase : List[Any] = pipeline("""audio-classification""" , model=lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" ) _UpperCAmelCase : Union[str, Any] = np.array(dataset[3]["""speech"""] , dtype=np.floataa ) _UpperCAmelCase : Optional[int] = audio_classifier(lowerCAmelCase__ , top_k=4 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=3 ) , [ {"""score""": 0.9_8_1, """label""": """go"""}, {"""score""": 0.0_0_7, """label""": """up"""}, {"""score""": 0.0_0_6, """label""": """_unknown_"""}, {"""score""": 0.0_0_1, """label""": """down"""}, ] , ) @require_tf @unittest.skip("""Audio classification is not implemented for TF""" ) def snake_case_ (self ): pass
156
1
from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _UpperCamelCase (a__ :Tuple , a__ :Dict ): """simple docstring""" UpperCamelCase__ = k_size // 2 UpperCamelCase__ , UpperCamelCase__ = mgrid[0 - center : k_size - center, 0 - center : k_size - center] UpperCamelCase__ = 1 / (2 * pi * sigma) * exp(-(square(a__ ) + square(a__ )) / (2 * square(a__ )) ) return g def _UpperCamelCase (a__ :Dict , a__ :Optional[int] , a__ :Dict ): """simple docstring""" UpperCamelCase__ , UpperCamelCase__ = image.shape[0], image.shape[1] # dst image height and width UpperCamelCase__ = height - k_size + 1 UpperCamelCase__ = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows UpperCamelCase__ = zeros((dst_height * dst_width, k_size * k_size) ) UpperCamelCase__ = 0 for i, j in product(range(a__ ) , range(a__ ) ): UpperCamelCase__ = ravel(image[i : i + k_size, j : j + k_size] ) UpperCamelCase__ = window row += 1 # turn the kernel into shape(k*k, 1) UpperCamelCase__ = gen_gaussian_kernel(a__ , a__ ) UpperCamelCase__ = ravel(a__ ) # reshape and get the dst image UpperCamelCase__ = dot(a__ , a__ ).reshape(a__ , a__ ).astype(a__ ) return dst if __name__ == "__main__": # read original image UpperCamelCase__ = imread(r"../image_data/lena.jpg") # turn image in gray scale value UpperCamelCase__ = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size UpperCamelCase__ = gaussian_filter(gray, 3, sigma=1) UpperCamelCase__ = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow("gaussian filter with 3x3 mask", gaussianaxa) imshow("gaussian filter with 5x5 mask", gaussianaxa) waitKey()
619
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class __SCREAMING_SNAKE_CASE ( _a , _a ): snake_case : int = """pixel_values""" snake_case : List[Any] = False snake_case : str = TimmBackboneConfig def __init__( self , __lowerCAmelCase , **__lowerCAmelCase ): requires_backends(self , """timm""" ) super().__init__(__lowerCAmelCase ) UpperCamelCase__ = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(f"""backbone {config.backbone} is not supported by timm.""" ) if hasattr(__lowerCAmelCase , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) UpperCamelCase__ = getattr(__lowerCAmelCase , """use_pretrained_backbone""" , __lowerCAmelCase ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. UpperCamelCase__ = config.out_indices if getattr(__lowerCAmelCase , """out_indices""" , __lowerCAmelCase ) is not None else (-1,) UpperCamelCase__ = timm.create_model( config.backbone , pretrained=__lowerCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=__lowerCAmelCase , **__lowerCAmelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. UpperCamelCase__ = self._backbone.return_layers UpperCamelCase__ = {layer["""module"""]: str(__lowerCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(__lowerCAmelCase ) @classmethod def _lowerCamelCase ( cls , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ): requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig UpperCamelCase__ = kwargs.pop("""config""" , TimmBackboneConfig() ) UpperCamelCase__ = kwargs.pop("""use_timm_backbone""" , __lowerCAmelCase ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) UpperCamelCase__ = kwargs.pop("""num_channels""" , config.num_channels ) UpperCamelCase__ = kwargs.pop("""features_only""" , config.features_only ) UpperCamelCase__ = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) UpperCamelCase__ = kwargs.pop("""out_indices""" , config.out_indices ) UpperCamelCase__ = TimmBackboneConfig( backbone=__lowerCAmelCase , num_channels=__lowerCAmelCase , features_only=__lowerCAmelCase , use_pretrained_backbone=__lowerCAmelCase , out_indices=__lowerCAmelCase , ) return super()._from_config(__lowerCAmelCase , **__lowerCAmelCase ) def _lowerCamelCase ( self , __lowerCAmelCase ): pass def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ): UpperCamelCase__ = return_dict if return_dict is not None else self.config.use_return_dict UpperCamelCase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCamelCase__ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone UpperCamelCase__ = self._all_layers UpperCamelCase__ = self._backbone(__lowerCAmelCase , **__lowerCAmelCase ) UpperCamelCase__ = self._return_layers UpperCamelCase__ = tuple(hidden_states[i] for i in self.out_indices ) else: UpperCamelCase__ = self._backbone(__lowerCAmelCase , **__lowerCAmelCase ) UpperCamelCase__ = None UpperCamelCase__ = tuple(__lowerCAmelCase ) UpperCamelCase__ = tuple(__lowerCAmelCase ) if hidden_states is not None else None if not return_dict: UpperCamelCase__ = (feature_maps,) if output_hidden_states: UpperCamelCase__ = output + (hidden_states,) return output return BackboneOutput(feature_maps=__lowerCAmelCase , hidden_states=__lowerCAmelCase , attentions=__lowerCAmelCase )
619
1
"""simple docstring""" import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__) def _lowerCamelCase( a , a , a , a=None , a=None ): # Recurse if needed if "." in tensor_name: __a = tensor_name.split("." ) for split in splits[:-1]: __a = getattr(a , a ) if new_module is None: raise ValueError(F"{module} has no attribute {split}." ) __a = new_module __a = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." ) __a = tensor_name in module._buffers __a = getattr(a , a ) if old_value.device == torch.device("meta" ) and device not in ["meta", torch.device("meta" )] and value is None: raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." ) __a = False __a = False if is_buffer or not is_bitsandbytes_available(): __a = False __a = False else: __a = hasattr(bnb.nn , "Params4bit" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __a = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __a = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __a = old_value.to(a ) elif isinstance(a , torch.Tensor ): __a = value.to("cpu" ) if value.dtype == torch.inta: __a = version.parse(importlib.metadata.version("bitsandbytes" ) ) > version.parse( "0.37.2" ) if not is_abit_serializable: raise ValueError( "Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. " "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`." ) else: __a = torch.tensor(a , device="cpu" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , a ) and fpaa_statistics is None: __a = new_value.T __a = old_value.__dict__ if is_abit: __a = bnb.nn.IntaParams(a , requires_grad=a , **a ).to(a ) elif is_abit: __a = bnb.nn.Paramsabit(a , requires_grad=a , **a ).to(a ) __a = new_value if fpaa_statistics is not None: setattr(module.weight , "SCB" , fpaa_statistics.to(a ) ) else: if value is None: __a = old_value.to(a ) elif isinstance(a , torch.Tensor ): __a = value.to(a ) else: __a = torch.tensor(a , device=a ) if is_buffer: __a = new_value else: __a = nn.Parameter(a , requires_grad=old_value.requires_grad ) __a = new_value def _lowerCamelCase( a , a=None , a=None , a=None , a=False ): for name, module in model.named_children(): if current_key_name is None: __a = [] current_key_name.append(a ) if (isinstance(a , nn.Linear ) or isinstance(a , a )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in ".".join(a ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(a , a ): __a , __a = module.weight.shape else: __a = module.in_features __a = module.out_features if quantization_config.quantization_method() == "llm_int8": __a = bnb.nn.LinearabitLt( a , a , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __a = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __a = bnb.nn.Linearabit( a , a , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __a = True # Store the module class in case we need to transpose the weight later __a = type(a ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(a ) if len(list(module.children() ) ) > 0: __a , __a = _replace_with_bnb_linear( a , a , a , a , has_been_replaced=a , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _lowerCamelCase( a , a=None , a=None , a=None ): __a = ["lm_head"] if modules_to_not_convert is None else modules_to_not_convert __a , __a = _replace_with_bnb_linear( a , a , a , a ) if not has_been_replaced: logger.warning( "You are loading your model in 8bit or 4bit but no linear modules were found in your model." " Please double check your model architecture, or submit an issue on github if you think this is" " a bug." ) return model def _lowerCamelCase( *a , **a ): warnings.warn( "`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead" , a , ) return replace_with_bnb_linear(*a , **a ) def _lowerCamelCase( *a , **a ): warnings.warn( "`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead" , a , ) return set_module_quantized_tensor_to_device(*a , **a ) def _lowerCamelCase( a ): __a = deepcopy(a ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __a = find_tied_parameters(a ) # For compatibility with Accelerate < 0.18 if isinstance(a , a ): __a = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __a = sum(a , [] ) __a = len(a ) > 0 # Check if it is a base model __a = not hasattr(a , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __a = list(model.named_children() ) __a = [list_modules[-1][0]] # add last module together with tied weights __a = set(a ) - set(a ) __a = list(set(a ) ) + list(a ) # remove ".weight" from the keys __a = [".weight", ".bias"] __a = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __a = name.replace(a , "" ) filtered_module_names.append(a ) return filtered_module_names
67
"""simple docstring""" from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
67
1
def _snake_case (_snake_case : int) -> int: _lowercase =0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def _snake_case (_snake_case : int = 100) -> int: _lowercase =1 _lowercase =2 for i in range(2 , max_n + 1): _lowercase =pre_numerator _lowercase =2 * i // 3 if i % 3 == 0 else 1 _lowercase =cur_numerator _lowercase =e_cont * pre_numerator + temp return sum_digits(_snake_case) if __name__ == "__main__": print(f'''{solution() = }''')
181
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self :List[str]): """simple docstring""" _lowercase =tempfile.mkdtemp() _lowercase =BlipImageProcessor() _lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model') _lowercase =BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert') _lowercase =InstructBlipProcessor(snake_case, snake_case, snake_case) processor.save_pretrained(self.tmpdirname) def UpperCamelCase__ ( self :List[str], **snake_case :str): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).tokenizer def UpperCamelCase__ ( self :Optional[Any], **snake_case :List[Any]): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).image_processor def UpperCamelCase__ ( self :Tuple, **snake_case :Any): """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).qformer_tokenizer def UpperCamelCase__ ( self :Optional[int]): """simple docstring""" shutil.rmtree(self.tmpdirname) def UpperCamelCase__ ( self :List[str]): """simple docstring""" _lowercase =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)] _lowercase =[Image.fromarray(np.moveaxis(snake_case, 0, -1)) for x in image_inputs] return image_inputs def UpperCamelCase__ ( self :List[Any]): """simple docstring""" _lowercase =InstructBlipProcessor( tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), ) processor.save_pretrained(self.tmpdirname) _lowercase =self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)') _lowercase =self.get_image_processor(do_normalize=snake_case, padding_value=1.0) _lowercase =InstructBlipProcessor.from_pretrained( self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=snake_case, padding_value=1.0) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, snake_case) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, snake_case) self.assertIsInstance(processor.qformer_tokenizer, snake_case) def UpperCamelCase__ ( self :Tuple): """simple docstring""" _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =self.get_qformer_tokenizer() _lowercase =InstructBlipProcessor( tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case) _lowercase =self.prepare_image_inputs() _lowercase =image_processor(snake_case, return_tensors='np') _lowercase =processor(images=snake_case, return_tensors='np') for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def UpperCamelCase__ ( self :List[Any]): """simple docstring""" _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =self.get_qformer_tokenizer() _lowercase =InstructBlipProcessor( tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case) _lowercase ='lower newer' _lowercase =processor(text=snake_case) _lowercase =tokenizer(snake_case, return_token_type_ids=snake_case) _lowercase =qformer_tokenizer(snake_case, return_token_type_ids=snake_case) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key], encoded_processor[key]) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['qformer_' + key]) def UpperCamelCase__ ( self :Any): """simple docstring""" _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =self.get_qformer_tokenizer() _lowercase =InstructBlipProcessor( tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case) _lowercase ='lower newer' _lowercase =self.prepare_image_inputs() _lowercase =processor(text=snake_case, images=snake_case) self.assertListEqual( list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], ) # test if it raises when no input is passed with pytest.raises(snake_case): processor() def UpperCamelCase__ ( self :Dict): """simple docstring""" _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =self.get_qformer_tokenizer() _lowercase =InstructBlipProcessor( tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case) _lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _lowercase =processor.batch_decode(snake_case) _lowercase =tokenizer.batch_decode(snake_case) self.assertListEqual(snake_case, snake_case) def UpperCamelCase__ ( self :int): """simple docstring""" _lowercase =self.get_image_processor() _lowercase =self.get_tokenizer() _lowercase =self.get_qformer_tokenizer() _lowercase =InstructBlipProcessor( tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case) _lowercase ='lower newer' _lowercase =self.prepare_image_inputs() _lowercase =processor(text=snake_case, images=snake_case) self.assertListEqual( list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
181
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCAmelCase = { 'configuration_informer': [ 'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = [ 'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'InformerForPrediction', 'InformerModel', 'InformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
"""simple docstring""" import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class _SCREAMING_SNAKE_CASE : def __init__( self , __A , __A=2 , __A=32 , __A=16 , __A=3 , __A=True , __A=True , __A=32 , __A=4 , __A=[0, 1, 2, 3] , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.0_2 , __A=3 , __A=[1, 384, 24, 24] , __A=True , __A=None , ) -> Optional[Any]: lowerCAmelCase_ :Optional[Any] = parent lowerCAmelCase_ :Optional[int] = batch_size lowerCAmelCase_ :Dict = image_size lowerCAmelCase_ :Tuple = patch_size lowerCAmelCase_ :Union[str, Any] = num_channels lowerCAmelCase_ :Tuple = is_training lowerCAmelCase_ :Dict = use_labels lowerCAmelCase_ :Union[str, Any] = hidden_size lowerCAmelCase_ :Union[str, Any] = num_hidden_layers lowerCAmelCase_ :List[Any] = backbone_out_indices lowerCAmelCase_ :Optional[int] = num_attention_heads lowerCAmelCase_ :List[str] = intermediate_size lowerCAmelCase_ :Any = hidden_act lowerCAmelCase_ :str = hidden_dropout_prob lowerCAmelCase_ :Tuple = attention_probs_dropout_prob lowerCAmelCase_ :List[Any] = initializer_range lowerCAmelCase_ :Optional[int] = num_labels lowerCAmelCase_ :List[Any] = backbone_featmap_shape lowerCAmelCase_ :List[Any] = scope lowerCAmelCase_ :Tuple = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase_ :Optional[int] = (image_size // patch_size) ** 2 lowerCAmelCase_ :Optional[Any] = num_patches + 1 def __lowerCAmelCase ( self ) -> Optional[Any]: lowerCAmelCase_ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase_ :List[str] = None if self.use_labels: lowerCAmelCase_ :Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase_ :List[str] = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Dict = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [96, 192, 384, 768], """num_groups""": 2, } return DPTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__A , backbone_featmap_shape=self.backbone_featmap_shape , ) def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]: lowerCAmelCase_ :Dict = DPTModel(config=__A ) model.to(__A ) model.eval() lowerCAmelCase_ :List[str] = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , __A , __A , __A ) -> List[str]: lowerCAmelCase_ :Union[str, Any] = self.num_labels lowerCAmelCase_ :Optional[Any] = DPTForDepthEstimation(__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Dict = model(__A ) self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) ) def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]: lowerCAmelCase_ :int = self.num_labels lowerCAmelCase_ :Optional[int] = DPTForSemanticSegmentation(__A ) model.to(__A ) model.eval() lowerCAmelCase_ :Union[str, Any] = model(__A , labels=__A ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = config_and_inputs lowerCAmelCase_ :str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): UpperCAmelCase_ :str = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () UpperCAmelCase_ :Union[str, Any] = ( { "depth-estimation": DPTForDepthEstimation, "feature-extraction": DPTModel, "image-segmentation": DPTForSemanticSegmentation, } if is_torch_available() else {} ) UpperCAmelCase_ :Optional[Any] = False UpperCAmelCase_ :List[str] = False UpperCAmelCase_ :Dict = False def __lowerCAmelCase ( self ) -> List[Any]: lowerCAmelCase_ :List[str] = DPTModelTester(self ) lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 ) def __lowerCAmelCase ( self ) -> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def __lowerCAmelCase ( self ) -> List[str]: pass def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ :Dict = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase_ :Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , nn.Linear ) ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase_ :Union[str, Any] = model_class(__A ) lowerCAmelCase_ :int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase_ :int = [*signature.parameters.keys()] lowerCAmelCase_ :int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def __lowerCAmelCase ( self ) -> str: lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*__A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__A ) def __lowerCAmelCase ( self ) -> List[str]: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :Any = True if model_class in get_values(__A ): continue lowerCAmelCase_ :Union[str, Any] = model_class(__A ) model.to(__A ) model.train() lowerCAmelCase_ :Optional[int] = self._prepare_for_class(__A , __A , return_labels=__A ) lowerCAmelCase_ :Dict = model(**__A ).loss loss.backward() def __lowerCAmelCase ( self ) -> Any: for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue lowerCAmelCase_ , lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :Dict = False lowerCAmelCase_ :Optional[Any] = True if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing: continue lowerCAmelCase_ :Optional[Any] = model_class(__A ) model.to(__A ) model.gradient_checkpointing_enable() model.train() lowerCAmelCase_ :Any = self._prepare_for_class(__A , __A , return_labels=__A ) lowerCAmelCase_ :Dict = model(**__A ).loss loss.backward() def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ , lowerCAmelCase_ :Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :int = _config_zero_init(__A ) for model_class in self.all_model_classes: lowerCAmelCase_ :str = model_class(config=__A ) # Skip the check for the backbone lowerCAmelCase_ :List[Any] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": lowerCAmelCase_ :Tuple = [f"""{name}.{key}""" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def __lowerCAmelCase ( self ) -> List[str]: pass @slow def __lowerCAmelCase ( self ) -> List[str]: for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: lowerCAmelCase_ :List[Any] = DPTModel.from_pretrained(__A ) self.assertIsNotNone(__A ) def __lowerCAmelCase ( self ) -> int: # We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ :List[Any] = """add""" with self.assertRaises(__A ): lowerCAmelCase_ :Optional[Any] = DPTForDepthEstimation(__A ) def _snake_case ( ) -> int: '''simple docstring''' lowerCAmelCase_ :int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision @slow class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> int: lowerCAmelCase_ :List[str] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) lowerCAmelCase_ :str = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__A ) lowerCAmelCase_ :Any = prepare_img() lowerCAmelCase_ :str = image_processor(images=__A , return_tensors="""pt""" ).to(__A ) # forward pass with torch.no_grad(): lowerCAmelCase_ :Union[str, Any] = model(**__A ) lowerCAmelCase_ :str = outputs.predicted_depth # verify the predicted depth lowerCAmelCase_ :Tuple = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape , __A ) lowerCAmelCase_ :int = torch.tensor( [[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__A ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __A , atol=1E-4 ) )
256
0
import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class _a ( unittest.TestCase ): def lowerCamelCase_ ( self: Union[str, Any] ) -> Union[str, Any]: """simple docstring""" super().tearDown() gc.collect() def lowerCamelCase_ ( self: Optional[Any] ) -> Optional[Any]: """simple docstring""" lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( '''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , ) lowercase__ = '''A painting of a squirrel eating a burger''' lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ ) lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]: """simple docstring""" lowercase__ = '''stabilityai/stable-diffusion-2''' lowercase__ , lowercase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' ) lowercase__ , lowercase__ = FlaxStableDiffusionPipeline.from_pretrained( UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , ) lowercase__ = scheduler_params lowercase__ = '''A painting of a squirrel eating a burger''' lowercase__ = jax.device_count() lowercase__ = num_samples * [prompt] lowercase__ = sd_pipe.prepare_inputs(UpperCamelCase_ ) lowercase__ = replicate(UpperCamelCase_ ) lowercase__ = shard(UpperCamelCase_ ) lowercase__ = jax.random.PRNGKey(0 ) lowercase__ = jax.random.split(UpperCamelCase_ , jax.device_count() ) lowercase__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) lowercase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ = images[0, 253:256, 253:256, -1] lowercase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
43
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract lowerCAmelCase__ = logging.get_logger(__name__) def _lowerCamelCase ( __a, __a, __a ): return [ int(1_000 * (box[0] / width) ), int(1_000 * (box[1] / height) ), int(1_000 * (box[2] / width) ), int(1_000 * (box[3] / height) ), ] def _lowerCamelCase ( __a, __a, __a ): SCREAMING_SNAKE_CASE_ = to_pil_image(__a ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pil_image.size SCREAMING_SNAKE_CASE_ = pytesseract.image_to_data(__a, lang=__a, output_type='''dict''', config=__a ) SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height'''] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE_ = [idx for idx, word in enumerate(__a ) if not word.strip()] SCREAMING_SNAKE_CASE_ = [word for idx, word in enumerate(__a ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE_ = [coord for idx, coord in enumerate(__a ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE_ = [] for x, y, w, h in zip(__a, __a, __a, __a ): SCREAMING_SNAKE_CASE_ = [x, y, x + w, y + h] actual_boxes.append(__a ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(__a, __a, __a ) ) assert len(__a ) == len(__a ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case ( __lowercase ): UpperCAmelCase__ = ['''pixel_values'''] def __init__(self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = 1 / 2_55 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "" , **SCREAMING_SNAKE_CASE_ , ): """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = size if size is not None else {'''height''': 2_24, '''width''': 2_24} SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = do_resize SCREAMING_SNAKE_CASE_ = size SCREAMING_SNAKE_CASE_ = resample SCREAMING_SNAKE_CASE_ = do_rescale SCREAMING_SNAKE_CASE_ = rescale_value SCREAMING_SNAKE_CASE_ = do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else IMAGENET_STANDARD_STD SCREAMING_SNAKE_CASE_ = apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) if "height" not in size or "width" not in size: raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' ) SCREAMING_SNAKE_CASE_ = (size['''height'''], size['''width''']) return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): """simple docstring""" return rescale(SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ): """simple docstring""" return normalize(SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE_ = size if size is not None else self.size SCREAMING_SNAKE_CASE_ = get_size_dict(SCREAMING_SNAKE_CASE_ ) SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE_ = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE_ = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize SCREAMING_SNAKE_CASE_ = image_mean if image_mean is not None else self.image_mean SCREAMING_SNAKE_CASE_ = image_std if image_std is not None else self.image_std SCREAMING_SNAKE_CASE_ = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE_ = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE_ = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE_ = make_list_of_images(SCREAMING_SNAKE_CASE_ ) if not valid_images(SCREAMING_SNAKE_CASE_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self , '''pytesseract''' ) SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = [] for image in images: SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) words_batch.append(SCREAMING_SNAKE_CASE_ ) boxes_batch.append(SCREAMING_SNAKE_CASE_ ) if do_resize: SCREAMING_SNAKE_CASE_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE_ = [self.rescale(image=SCREAMING_SNAKE_CASE_ , scale=SCREAMING_SNAKE_CASE_ ) for image in images] if do_normalize: SCREAMING_SNAKE_CASE_ = [self.normalize(image=SCREAMING_SNAKE_CASE_ , mean=SCREAMING_SNAKE_CASE_ , std=SCREAMING_SNAKE_CASE_ ) for image in images] SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images] SCREAMING_SNAKE_CASE_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ ) if apply_ocr: SCREAMING_SNAKE_CASE_ = words_batch SCREAMING_SNAKE_CASE_ = boxes_batch return data
626
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
from __future__ import annotations lowercase : Tuple = "Muhammad Umer Farooq" lowercase : Union[str, Any] = "MIT" lowercase : int = "1.0.0" lowercase : str = "Muhammad Umer Farooq" lowercase : List[Any] = "contact@muhammadumerfarooq.me" lowercase : Optional[int] = "Alpha" import re from html.parser import HTMLParser from urllib import parse import requests class a__ ( __UpperCAmelCase ): def __init__( self : Any , A_ : str ) -> List[Any]: """simple docstring""" super().__init__() lowerCamelCase_: Dict = [] lowerCamelCase_: Union[str, Any] = domain def lowerCAmelCase ( self : int , A_ : str , A_ : list[tuple[str, str | None]] ) -> List[Any]: """simple docstring""" if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: lowerCamelCase_: Any = parse.urljoin(self.domain , lowerCAmelCase_ ) self.urls.append(lowerCAmelCase_ ) def UpperCAmelCase_ ( _UpperCAmelCase ): return ".".join(get_sub_domain_name(__lowerCAmelCase ).split(""".""" )[-2:] ) def UpperCAmelCase_ ( _UpperCAmelCase ): return parse.urlparse(__lowerCAmelCase ).netloc def UpperCAmelCase_ ( _UpperCAmelCase = "https://github.com" ): lowerCamelCase_: Dict = get_domain_name(__lowerCAmelCase ) # Initialize the parser lowerCamelCase_: Optional[int] = Parser(__lowerCAmelCase ) try: # Open URL lowerCamelCase_: List[Any] = requests.get(__lowerCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through lowerCamelCase_: str = set() for link in parser.urls: # open URL. # read = requests.get(link) try: lowerCamelCase_: Dict = requests.get(__lowerCAmelCase ) # Get the valid email. lowerCamelCase_: List[str] = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text ) # If not in list then append it. for email in emails: valid_emails.add(__lowerCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(__lowerCAmelCase ) if __name__ == "__main__": lowercase : Tuple = emails_from_url("""https://github.com""") print(F"{len(emails)} emails found:") print("""\n""".join(sorted(emails)))
423
"""simple docstring""" import sys import turtle def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> tuple[float, float]: '''simple docstring''' return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> None: '''simple docstring''' my_pen.up() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) my_pen.goto(vertexa[0] , vertexa[1] ) if depth == 0: return triangle(__lowerCAmelCase , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , depth - 1 ) triangle(__lowerCAmelCase , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , depth - 1 ) triangle(__lowerCAmelCase , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , get_mid(__lowerCAmelCase , __lowerCAmelCase ) , depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( "Correct format for using this script: " "python fractals.py <int:depth_for_fractal>" ) UpperCAmelCase : Optional[Any] = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor("red") UpperCAmelCase : str = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
567
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : Optional[Any] = { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json" ), "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json" ), } class __lowerCAmelCase ( lowerCAmelCase): _a = '''dpr''' def __init__( self: Optional[int] , _lowerCAmelCase: Optional[int]=3_05_22 , _lowerCAmelCase: Union[str, Any]=7_68 , _lowerCAmelCase: Any=12 , _lowerCAmelCase: Any=12 , _lowerCAmelCase: List[str]=30_72 , _lowerCAmelCase: Union[str, Any]="gelu" , _lowerCAmelCase: Any=0.1 , _lowerCAmelCase: int=0.1 , _lowerCAmelCase: Union[str, Any]=5_12 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Tuple=0.02 , _lowerCAmelCase: Union[str, Any]=1e-1_2 , _lowerCAmelCase: Optional[int]=0 , _lowerCAmelCase: str="absolute" , _lowerCAmelCase: int = 0 , **_lowerCAmelCase: int , ): super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase ) lowercase :List[Any] = vocab_size lowercase :Dict = hidden_size lowercase :Optional[Any] = num_hidden_layers lowercase :Optional[Any] = num_attention_heads lowercase :Tuple = hidden_act lowercase :Optional[int] = intermediate_size lowercase :Union[str, Any] = hidden_dropout_prob lowercase :Tuple = attention_probs_dropout_prob lowercase :str = max_position_embeddings lowercase :Union[str, Any] = type_vocab_size lowercase :int = initializer_range lowercase :Optional[Any] = layer_norm_eps lowercase :Union[str, Any] = projection_dim lowercase :Optional[int] = position_embedding_type
707
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer _UpperCAmelCase : Optional[int] = logging.get_logger(__name__) _UpperCAmelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _UpperCAmelCase : Optional[Any] = { "vocab_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt" ), "google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt", "google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt", "google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt", "google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt", }, "tokenizer_file": { "google/realm-cc-news-pretrained-embedder": ( "https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont" ), "google/realm-cc-news-pretrained-encoder": ( "https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-scorer": ( "https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json" ), "google/realm-cc-news-pretrained-openqa": ( "https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json" ), "google/realm-orqa-nq-openqa": ( "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-nq-reader": ( "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-openqa": ( "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json" ), "google/realm-orqa-wq-reader": ( "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json" ), }, } _UpperCAmelCase : Tuple = { "google/realm-cc-news-pretrained-embedder": 512, "google/realm-cc-news-pretrained-encoder": 512, "google/realm-cc-news-pretrained-scorer": 512, "google/realm-cc-news-pretrained-openqa": 512, "google/realm-orqa-nq-openqa": 512, "google/realm-orqa-nq-reader": 512, "google/realm-orqa-wq-openqa": 512, "google/realm-orqa-wq-reader": 512, } _UpperCAmelCase : Dict = { "google/realm-cc-news-pretrained-embedder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-encoder": {"do_lower_case": True}, "google/realm-cc-news-pretrained-scorer": {"do_lower_case": True}, "google/realm-cc-news-pretrained-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-openqa": {"do_lower_case": True}, "google/realm-orqa-nq-reader": {"do_lower_case": True}, "google/realm-orqa-wq-openqa": {"do_lower_case": True}, "google/realm-orqa-wq-reader": {"do_lower_case": True}, } class __lowerCAmelCase ( lowerCAmelCase): _a = VOCAB_FILES_NAMES _a = PRETRAINED_VOCAB_FILES_MAP _a = PRETRAINED_INIT_CONFIGURATION _a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _a = RealmTokenizer def __init__( self: Tuple , _lowerCAmelCase: int=None , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Optional[Any]="[UNK]" , _lowerCAmelCase: Dict="[SEP]" , _lowerCAmelCase: List[str]="[PAD]" , _lowerCAmelCase: Optional[int]="[CLS]" , _lowerCAmelCase: List[Any]="[MASK]" , _lowerCAmelCase: Tuple=True , _lowerCAmelCase: Tuple=None , **_lowerCAmelCase: Union[str, Any] , ): super().__init__( _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , ) lowercase :List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _lowerCAmelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _lowerCAmelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _lowerCAmelCase ) != tokenize_chinese_chars ): lowercase :List[str] = getattr(_lowerCAmelCase , normalizer_state.pop("type" ) ) lowercase :List[str] = do_lower_case lowercase :Any = strip_accents lowercase :Tuple = tokenize_chinese_chars lowercase :Any = normalizer_class(**_lowerCAmelCase ) lowercase :Any = do_lower_case def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Optional[Any] , **_lowerCAmelCase: Optional[int] ): lowercase :List[str] = PaddingStrategy.MAX_LENGTH lowercase :Optional[Any] = text lowercase :str = kwargs.pop("text_pair" , _lowerCAmelCase ) lowercase :Any = kwargs.pop("return_tensors" , _lowerCAmelCase ) lowercase :List[str] = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(_lowerCAmelCase ): if batch_text_pair is not None: lowercase :Any = batch_text_pair[idx] else: lowercase :Optional[Any] = None lowercase :Optional[Any] = super().__call__(_lowerCAmelCase , _lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase ) lowercase :Tuple = encoded_candidates.get("input_ids" ) lowercase :Optional[Any] = encoded_candidates.get("attention_mask" ) lowercase :int = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(_lowerCAmelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(_lowerCAmelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(_lowerCAmelCase ) lowercase :str = {key: item for key, item in output_data.items() if len(_lowerCAmelCase ) != 0} return BatchEncoding(_lowerCAmelCase , tensor_type=_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Any , _lowerCAmelCase: Tuple=None ): lowercase :Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ): lowercase :List[str] = [self.sep_token_id] lowercase :List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def SCREAMING_SNAKE_CASE ( self: Optional[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ): lowercase :Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
453
0
'''simple docstring''' import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def _A ( ): '''simple docstring''' A__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' A__ = Image.open(requests.get(UpperCAmelCase ,stream=UpperCAmelCase ).raw ).convert('RGB' ) return image def _A ( UpperCAmelCase ): '''simple docstring''' A__ = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ): '''simple docstring''' A__ = dct.pop(UpperCAmelCase ) A__ = val def _A ( UpperCAmelCase ,UpperCAmelCase ): '''simple docstring''' for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases A__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) A__ = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict A__ = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase ,requires_grad=UpperCAmelCase ), v_bias) ) A__ = qkv_bias def _A ( UpperCAmelCase ): '''simple docstring''' A__ = 364 if 'coco' in model_name else 224 A__ = InstructBlipVisionConfig(image_size=UpperCAmelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: A__ = TaConfig.from_pretrained('google/flan-t5-xl' ,dense_act_fn='gelu' ,bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: A__ = TaConfig.from_pretrained('google/flan-t5-xxl' ,dense_act_fn='gelu' ,bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: A__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' ,vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: A__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' ,vocab_size=32001 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 A__ = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() A__ = InstructBlipConfig(vision_config=UpperCAmelCase ,text_config=UpperCAmelCase ,qformer_config=UpperCAmelCase ) return config, image_size @torch.no_grad() def _A ( UpperCAmelCase ,UpperCAmelCase=None ,UpperCAmelCase=False ): '''simple docstring''' A__ = AutoTokenizer.from_pretrained('bert-base-uncased' ,truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: A__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' ,truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) A__ = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' ,truncation_side='left' ,bos_token='</s>' ,unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) A__ , A__ = get_blipa_config(UpperCAmelCase ) A__ = InstructBlipForConditionalGeneration(UpperCAmelCase ).eval() A__ = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } A__ , A__ = model_name_to_original[model_name] # load original model print('Loading original model...' ) A__ = 'cuda:1' if torch.cuda.is_available() else 'cpu' A__ = 'cuda:2' if torch.cuda.is_available() else 'cpu' A__ , A__ , A__ = load_model_and_preprocess( name=UpperCAmelCase ,model_type=UpperCAmelCase ,is_eval=UpperCAmelCase ,device=UpperCAmelCase ) original_model.eval() print('Done!' ) # update state dict keys A__ = original_model.state_dict() A__ = create_rename_keys(UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): A__ = state_dict.pop(UpperCAmelCase ) if key.startswith('Qformer.bert' ): A__ = key.replace('Qformer.bert' ,'qformer' ) if "attention.self" in key: A__ = key.replace('self' ,'attention' ) if "llm_proj" in key: A__ = key.replace('llm_proj' ,'language_projection' ) if "t5_proj" in key: A__ = key.replace('t5_proj' ,'language_projection' ) if key.startswith('llm_model' ): A__ = key.replace('llm_model' ,'language_model' ) if key.startswith('t5' ): A__ = key.replace('t5' ,'language' ) A__ = val # read in qv biases read_in_q_v_bias(UpperCAmelCase ,UpperCAmelCase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(UpperCAmelCase ,strict=UpperCAmelCase ) A__ = load_demo_image() A__ = 'What is unusual about this image?' # create processor A__ = BlipImageProcessor( size={'height': image_size, 'width': image_size} ,image_mean=UpperCAmelCase ,image_std=UpperCAmelCase ) A__ = InstructBlipProcessor( image_processor=UpperCAmelCase ,tokenizer=UpperCAmelCase ,qformer_tokenizer=UpperCAmelCase ,) A__ = processor(images=UpperCAmelCase ,text=UpperCAmelCase ,return_tensors='pt' ).to(UpperCAmelCase ) # make sure processor creates exact same pixel values A__ = vis_processors['eval'](UpperCAmelCase ).unsqueeze(0 ).to(UpperCAmelCase ) A__ = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,UpperCAmelCase ) original_model.to(UpperCAmelCase ) hf_model.to(UpperCAmelCase ) with torch.no_grad(): if "vicuna" in model_name: A__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits A__ = hf_model(**UpperCAmelCase ).logits else: A__ = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits A__ = tokenizer('\n' ,return_tensors='pt' ).input_ids.to(UpperCAmelCase ) A__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 ) A__ = hf_model(**UpperCAmelCase ,labels=UpperCAmelCase ).logits print('First values of original logits:' ,original_logits[0, :3, :3] ) print('First values of HF logits:' ,logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape A__ = 1e-4 if 'vicuna' in model_name else 1e-5 assert torch.allclose(original_logits.to(logits.device ) ,UpperCAmelCase ,atol=UpperCAmelCase ) print('Looks ok!' ) print('Generating with original model...' ) A__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} ,num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) A__ = hf_model.generate( **UpperCAmelCase ,do_sample=UpperCAmelCase ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? A__ = 2 print('Original generation:' ,UpperCAmelCase ) A__ = processor.batch_decode(UpperCAmelCase ,skip_special_tokens=UpperCAmelCase ) A__ = [text.strip() for text in output_text] print('HF generation:' ,UpperCAmelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(UpperCAmelCase ) hf_model.save_pretrained(UpperCAmelCase ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() lowerCAmelCase_ = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) lowerCAmelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
531
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('''>=''', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType lowerCAmelCase_ = get_logger(__name__) def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ): '''simple docstring''' os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase ) with FSDP.state_dict_type( UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): A__ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A__ = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase ) if accelerator.process_index == 0: logger.info(F"""Saving model to {output_model_file}""" ) torch.save(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A__ = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Saving model to {output_model_file}""" ) torch.save(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Model saved to {output_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A__ = os.path.join(UpperCAmelCase ,F"""{MODEL_NAME}_{model_index}""" ) os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase ) logger.info(F"""Saving model to {ckpt_dir}""" ) A__ = {'model': state_dict} dist_cp.save_state_dict( state_dict=UpperCAmelCase ,storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase ) ,planner=DefaultSavePlanner() ,) logger.info(F"""Model saved to {ckpt_dir}""" ) def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(UpperCAmelCase ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( 'Set the `sync_module_states` flag to `True` so that model states are synced across processes when ' 'initializing FSDP object' ) return A__ = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin""" A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Loading model from {input_model_file}""" ) A__ = torch.load(UpperCAmelCase ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: A__ = ( F"""{MODEL_NAME}_rank{accelerator.process_index}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin""" ) A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Loading model from {input_model_file}""" ) A__ = torch.load(UpperCAmelCase ) logger.info(F"""Model loaded from {input_model_file}""" ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: A__ = ( os.path.join(UpperCAmelCase ,F"""{MODEL_NAME}_{model_index}""" ) if F"""{MODEL_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading model from {ckpt_dir}""" ) A__ = {'model': model.state_dict()} dist_cp.load_state_dict( state_dict=UpperCAmelCase ,storage_reader=dist_cp.FileSystemReader(UpperCAmelCase ) ,planner=DefaultLoadPlanner() ,) A__ = state_dict['model'] logger.info(F"""Model loaded from {ckpt_dir}""" ) model.load_state_dict(UpperCAmelCase ) def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ): '''simple docstring''' os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase ) with FSDP.state_dict_type( UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): A__ = FSDP.optim_state_dict(UpperCAmelCase ,UpperCAmelCase ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: A__ = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" ) torch.save(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Optimizer state saved in {output_optimizer_file}""" ) else: A__ = os.path.join(UpperCAmelCase ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) os.makedirs(UpperCAmelCase ,exist_ok=UpperCAmelCase ) logger.info(F"""Saving Optimizer state to {ckpt_dir}""" ) dist_cp.save_state_dict( state_dict={'optimizer': optim_state} ,storage_writer=dist_cp.FileSystemWriter(UpperCAmelCase ) ,planner=DefaultSavePlanner() ,) logger.info(F"""Optimizer state saved in {ckpt_dir}""" ) def _A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( UpperCAmelCase ,fsdp_plugin.state_dict_type ,fsdp_plugin.state_dict_config ,fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: A__ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: A__ = ( F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin""" ) A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase ) logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" ) A__ = torch.load(UpperCAmelCase ) logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" ) else: A__ = ( os.path.join(UpperCAmelCase ,F"""{OPTIMIZER_NAME}_{optimizer_index}""" ) if F"""{OPTIMIZER_NAME}""" not in input_dir else input_dir ) logger.info(F"""Loading Optimizer from {ckpt_dir}""" ) A__ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() ,optimizer_key='optimizer' ,storage_reader=dist_cp.FileSystemReader(UpperCAmelCase ) ,) A__ = optim_state['optimizer'] logger.info(F"""Optimizer loaded from {ckpt_dir}""" ) A__ = FSDP.optim_state_dict_to_load(UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ) optimizer.load_state_dict(UpperCAmelCase )
531
1
'''simple docstring''' def A__ ( A : int): '''simple docstring''' if isinstance(A , A): raise TypeError("'float' object cannot be interpreted as an integer") if isinstance(A , A): raise TypeError("'str' object cannot be interpreted as an integer") if num == 0: return "0b0" UpperCamelCase : List[Any] = False if num < 0: UpperCamelCase : Dict = True UpperCamelCase : Dict = -num UpperCamelCase : list[int] = [] while num > 0: binary.insert(0 , num % 2) num >>= 1 if negative: return "-0b" + "".join(str(A) for e in binary) return "0b" + "".join(str(A) for e in binary) if __name__ == "__main__": import doctest doctest.testmod()
707
'''simple docstring''' from __future__ import annotations import math def A__ ( A : int): '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A) + 1) , 6): if number % i == 0 or number % (i + 2) == 0: return False return True lowerCAmelCase_ = [num for num in range(3, 10_0001, 2) if not is_prime(num)] def A__ ( A : int): '''simple docstring''' if not isinstance(A , A): raise ValueError("n must be an integer") if n <= 0: raise ValueError("n must be >= 0") UpperCamelCase : Union[str, Any] = [] for num in range(len(A)): UpperCamelCase : Any = 0 while 2 * i * i <= odd_composites[num]: UpperCamelCase : str = odd_composites[num] - 2 * i * i if is_prime(A): break i += 1 else: list_nums.append(odd_composites[num]) if len(A) == n: return list_nums return [] def A__ ( ): '''simple docstring''' return compute_nums(1)[0] if __name__ == "__main__": print(f"""{solution() = }""")
435
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def a ( A__ : Optional[int] , A__ : Optional[int] , A__ : Optional[int] , A__ : Optional[Any] ) -> Tuple: """simple docstring""" for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})''' def a ( A__ : Optional[int] , A__ : List[str] , A__ : List[str] , A__ : Optional[Any] , A__ : int=True ) -> str: """simple docstring""" model.train() _lowercase =model(_A ) _lowercase =F.mse_loss(_A , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(_A ) def a ( A__ : Dict , A__ : Tuple=False ) -> Union[str, Any]: """simple docstring""" set_seed(42 ) _lowercase =RegressionModel() _lowercase =deepcopy(_A ) _lowercase =RegressionDataset(length=80 ) _lowercase =DataLoader(_A , batch_size=16 ) model.to(accelerator.device ) if sched: _lowercase =AdamW(params=model.parameters() , lr=1e-3 ) _lowercase =AdamW(params=ddp_model.parameters() , lr=1e-3 ) _lowercase =LambdaLR(_A , lr_lambda=lambda A__ : epoch**0.65 ) _lowercase =LambdaLR(_A , lr_lambda=lambda A__ : epoch**0.65 ) # Make a copy of `model` if sched: _lowercase , _lowercase , _lowercase , _lowercase =accelerator.prepare(_A , _A , _A , _A ) else: _lowercase , _lowercase =accelerator.prepare(_A , _A ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def a ( A__ : Tuple ) -> Tuple: """simple docstring""" _lowercase , _lowercase , _lowercase =get_training_setup(_A ) # Use a single batch _lowercase , _lowercase =next(iter(_A ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _lowercase , _lowercase =accelerator.gather((ddp_input, ddp_target) ) _lowercase , _lowercase =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_A , _A , _A , _A ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_A ): step_model(_A , _A , _A , _A ) else: # Sync grads step_model(_A , _A , _A , _A ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(_A , _A , _A , _A ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _lowercase =ddp_input[torch.randperm(len(_A ) )] def a ( A__ : str ) -> Union[str, Any]: """simple docstring""" _lowercase , _lowercase , _lowercase =get_training_setup(_A ) # Use a single batch _lowercase , _lowercase =next(iter(_A ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _lowercase , _lowercase =accelerator.gather((ddp_input, ddp_target) ) _lowercase , _lowercase =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_A , _A , _A , _A ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(_A ): step_model(_A , _A , _A , _A ) else: # Sync grads step_model(_A , _A , _A , _A ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _lowercase =ddp_input[torch.randperm(len(_A ) )] def a ( A__ : Dict=False , A__ : List[str]=False ) -> List[str]: """simple docstring""" _lowercase =Accelerator( split_batches=_A , dispatch_batches=_A , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _lowercase , _lowercase , _lowercase =get_training_setup(_A ) for iteration, batch in enumerate(_A ): _lowercase , _lowercase =batch.values() # Gather the distributed inputs and targs for the base model _lowercase , _lowercase =accelerator.gather((ddp_input, ddp_target) ) _lowercase , _lowercase =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(_A , _A , _A , _A , _A ) # Do "gradient accumulation" (noop) with accelerator.accumulate(_A ): step_model(_A , _A , _A , _A ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(_A ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})''' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})''' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _lowercase =ddp_input[torch.randperm(len(_A ) )] GradientState._reset_state() def a ( A__ : List[str]=False , A__ : Any=False ) -> Any: """simple docstring""" _lowercase =Accelerator( split_batches=_A , dispatch_batches=_A , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase =get_training_setup(_A , _A ) for iteration, batch in enumerate(_A ): _lowercase , _lowercase =batch.values() # Gather the distributed inputs and targs for the base model _lowercase , _lowercase =accelerator.gather((ddp_input, ddp_target) ) _lowercase , _lowercase =input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(_A , _A , _A , _A , _A ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_A )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(_A ): step_model(_A , _A , _A , _A ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n''' _lowercase =(((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_A )) if accelerator.num_processes > 1: check_model_parameters(_A , _A , _A , _A ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def a ( ) -> List[Any]: """simple docstring""" _lowercase =Accelerator() _lowercase =RegressionDataset(length=80 ) _lowercase =DataLoader(_A , batch_size=16 ) _lowercase =RegressionDataset(length=96 ) _lowercase =DataLoader(_A , batch_size=16 ) _lowercase , _lowercase =accelerator.prepare(_A , _A ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(_A ): assert id(accelerator.gradient_state.active_dataloader ) == id(_A ) if iteration < len(_A ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(_A ): assert id(accelerator.gradient_state.active_dataloader ) == id(_A ) if batch_num < len(_A ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def a ( ) -> Tuple: """simple docstring""" _lowercase =Accelerator() _lowercase =accelerator.state if state.local_process_index == 0: print('**Test `accumulate` gradient accumulation with dataloader break**' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('**Test NOOP `no_sync` context manager**' ) test_noop_sync(_A ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('**Test Distributed `no_sync` context manager**' ) test_distributed_sync(_A ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation(_A , _A ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , ) test_gradient_accumulation_with_opt_and_scheduler(_A , _A ) def a ( A__ : int ) -> Tuple: """simple docstring""" main() if __name__ == "__main__": main()
291
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def __UpperCamelCase ( ): lowerCAmelCase_ = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } lowerCAmelCase_ = Dataset.from_dict(_A ) return dataset class A ( __UpperCAmelCase ): def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = get_dataset() lowerCAmelCase_ = make_duplicate_clusters(UpperCamelCase__, 0.85 ) self.assertEqual(len(duplicate_clusters[0] ), 2 ) def SCREAMING_SNAKE_CASE__ ( self ): """simple docstring""" lowerCAmelCase_ = get_dataset() lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(UpperCamelCase__ ) self.assertEqual(len(UpperCamelCase__ ), 2 ) print(UpperCamelCase__ ) self.assertEqual(duplicate_clusters[0][0]['''copies'''], 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''], UpperCamelCase__ )
431
0
"""simple docstring""" import math import tensorflow as tf from packaging import version def lowerCamelCase (a_ :Optional[Any]) -> int: lowercase :Optional[int] = tf.convert_to_tensor(a_) lowercase :Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0) , x.dtype))) return x * cdf def lowerCamelCase (a_ :Optional[Any]) -> Optional[int]: lowercase :List[str] = tf.convert_to_tensor(a_) lowercase :Optional[Any] = tf.cast(math.pi , x.dtype) lowercase :Optional[Any] = tf.cast(0.04_47_15 , x.dtype) lowercase :int = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi) * (x + coeff * tf.pow(a_ , 3)))) return x * cdf def lowerCamelCase (a_ :Optional[Any]) -> Dict: lowercase :Optional[int] = tf.convert_to_tensor(a_) return x * tf.tanh(tf.math.softplus(a_)) def lowerCamelCase (a_ :Tuple) -> Union[str, Any]: lowercase :Optional[int] = tf.convert_to_tensor(a_) lowercase :str = tf.cast(0.04_47_15 , x.dtype) lowercase :List[Any] = tf.cast(0.79_78_84_56_08 , x.dtype) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x))) def lowerCamelCase (a_ :List[Any]) -> List[Any]: lowercase :Tuple = tf.convert_to_tensor(a_) lowercase :List[Any] = tf.cast(1.7_02 , x.dtype) return x * tf.math.sigmoid(coeff * x) def lowerCamelCase (a_ :Any) -> Dict: return tf.clip_by_value(_gelu(a_) , -10 , 10) def lowerCamelCase (a_ :Dict , a_ :Dict=-1) -> Optional[int]: lowercase :Any = tf.split(a_ , 2 , axis=a_) return a * tf.math.sigmoid(a_) if version.parse(tf.version.VERSION) >= version.parse('''2.4'''): def lowerCamelCase (a_ :List[str]) -> List[Any]: return tf.keras.activations.gelu(a_ , approximate=a_) UpperCAmelCase = tf.keras.activations.gelu UpperCAmelCase = approximate_gelu_wrap else: UpperCAmelCase = _gelu UpperCAmelCase = _gelu_new UpperCAmelCase = { '''gelu''': gelu, '''gelu_10''': gelu_aa, '''gelu_fast''': gelu_fast, '''gelu_new''': gelu_new, '''glu''': glu, '''mish''': mish, '''quick_gelu''': quick_gelu, '''relu''': tf.keras.activations.relu, '''sigmoid''': tf.keras.activations.sigmoid, '''silu''': tf.keras.activations.swish, '''swish''': tf.keras.activations.swish, '''tanh''': tf.keras.activations.tanh, } def lowerCamelCase (a_ :Any) -> str: if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys())}""")
713
"""simple docstring""" UpperCAmelCase = {str(digit): digit**5 for digit in range(10)} def lowerCamelCase (a_ :int) -> int: return sum(DIGITS_FIFTH_POWER[digit] for digit in str(a_)) def lowerCamelCase () -> int: return sum( number for number in range(1000 , 100_0000) if number == digits_fifth_powers_sum(a_)) if __name__ == "__main__": print(solution())
475
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _a = logging.get_logger(__name__) _a = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } _a = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } _a = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCAmelCase__() -> Optional[Any]: '''simple docstring''' lowerCamelCase__ = ( list(range(ord('''!''' ) ,ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) ,ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) ,ord('''ÿ''' ) + 1 ) ) ) lowerCamelCase__ = bs[:] lowerCamelCase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(lowerCamelCase__ ) cs.append(2**8 + n ) n += 1 lowerCamelCase__ = [chr(lowerCamelCase__ ) for n in cs] return dict(zip(lowerCamelCase__ ,lowerCamelCase__ ) ) def lowerCAmelCase__(__snake_case ) -> int: '''simple docstring''' lowerCamelCase__ = set() lowerCamelCase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase__ = char return pairs class __A ( _UpperCamelCase ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ["""input_ids""", """attention_mask"""] def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else bos_token lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else eos_token lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else sep_token lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else cls_token lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else unk_token lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else mask_token super().__init__( errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , **__lowerCAmelCase , ) with open(__lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle: lowerCamelCase__ = json.load(__lowerCAmelCase ) lowerCamelCase__ = {v: k for k, v in self.encoder.items()} lowerCamelCase__ = errors # how to handle errors in decoding lowerCamelCase__ = bytes_to_unicode() lowerCamelCase__ = {v: k for k, v in self.byte_encoder.items()} with open(__lowerCAmelCase , encoding='''utf-8''' ) as merges_handle: lowerCamelCase__ = merges_handle.read().split('''\n''' )[1:-1] lowerCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) ) lowerCamelCase__ = {} lowerCamelCase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase__ = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def __lowerCamelCase ( self ): '''simple docstring''' return len(self.encoder ) def __lowerCamelCase ( self ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' if token in self.cache: return self.cache[token] lowerCamelCase__ = tuple(__lowerCAmelCase ) lowerCamelCase__ = get_pairs(__lowerCAmelCase ) if not pairs: return token while True: lowerCamelCase__ = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase__ = bigram lowerCamelCase__ = [] lowerCamelCase__ = 0 while i < len(__lowerCAmelCase ): try: lowerCamelCase__ = word.index(__lowerCAmelCase , __lowerCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase__ = j if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase__ = tuple(__lowerCAmelCase ) lowerCamelCase__ = new_word if len(__lowerCAmelCase ) == 1: break else: lowerCamelCase__ = get_pairs(__lowerCAmelCase ) lowerCamelCase__ = " ".join(__lowerCAmelCase ) lowerCamelCase__ = word return word def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [] for token in re.findall(self.pat , __lowerCAmelCase ): lowerCamelCase__ = "".join( self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCAmelCase ).split(''' ''' ) ) return bpe_tokens def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' return self.decoder.get(__lowerCAmelCase ) def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = "".join(__lowerCAmelCase ) lowerCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors ) return text def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if not os.path.isdir(__lowerCAmelCase ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) lowerCamelCase__ = os.path.join( __lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + '''\n''' ) lowerCamelCase__ = 0 with open(__lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' ''' Please check that the tokenizer is not corrupted!''' ) lowerCamelCase__ = token_index writer.write(''' '''.join(__lowerCAmelCase ) + '''\n''' ) index += 1 return vocab_file, merge_file def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(__lowerCAmelCase )) + [1] return [1] + ([0] * len(__lowerCAmelCase )) + [1, 1] + ([0] * len(__lowerCAmelCase )) + [1] def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = kwargs.pop('''add_prefix_space''' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__lowerCAmelCase ) > 0 and not text[0].isspace()): lowerCamelCase__ = " " + text return (text, kwargs) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' return token_ids_a + [self.eos_token_id] def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(''' ''' + text ) else: # Generated responses should contain them already. inputs.append(__lowerCAmelCase ) lowerCamelCase__ = " ".join(__lowerCAmelCase ) lowerCamelCase__ = self.encode(__lowerCAmelCase ) if len(__lowerCAmelCase ) > self.model_max_length: lowerCamelCase__ = input_ids[-self.model_max_length :] logger.warning(F'Trimmed input from conversation as it was longer than {self.model_max_length} tokens.' ) return input_ids
481
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { '''configuration_mctct''': ['''MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MCTCTConfig'''], '''feature_extraction_mctct''': ['''MCTCTFeatureExtractor'''], '''processing_mctct''': ['''MCTCTProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MCTCTForCTC''', '''MCTCTModel''', '''MCTCTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
496
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import GLPNImageProcessor class a__( unittest.TestCase ): '''simple docstring''' def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=3 , __lowerCAmelCase=18 , __lowerCAmelCase=30 , __lowerCAmelCase=400 , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=True , ): """simple docstring""" lowerCAmelCase = parent lowerCAmelCase = batch_size lowerCAmelCase = num_channels lowerCAmelCase = image_size lowerCAmelCase = min_resolution lowerCAmelCase = max_resolution lowerCAmelCase = do_resize lowerCAmelCase = size_divisor lowerCAmelCase = do_rescale def a_ ( self): """simple docstring""" return { "do_resize": self.do_resize, "size_divisor": self.size_divisor, "do_rescale": self.do_rescale, } @require_torch @require_vision class a__( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' UpperCAmelCase_ : Any = GLPNImageProcessor if is_vision_available() else None def a_ ( self): """simple docstring""" lowerCAmelCase = GLPNImageProcessingTester(self) @property def a_ ( self): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""")) self.assertTrue(hasattr(__lowerCAmelCase , """size_divisor""")) self.assertTrue(hasattr(__lowerCAmelCase , """resample""")) self.assertTrue(hasattr(__lowerCAmelCase , """do_rescale""")) def a_ ( self): """simple docstring""" pass def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , Image.Image) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , np.ndarray) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0) def a_ ( self): """simple docstring""" lowerCAmelCase = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase) for image in image_inputs: self.assertIsInstance(__lowerCAmelCase , torch.Tensor) # Test not batched input (GLPNImageProcessor doesn't support batching) lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0) self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0)
715
'''simple docstring''' import functools import logging import os import sys import threading from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional import huggingface_hub.utils as hf_hub_utils from tqdm import auto as tqdm_lib __lowercase = threading.Lock() __lowercase = None __lowercase = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } __lowercase = logging.WARNING __lowercase = True def snake_case__ ( ) -> int: '''simple docstring''' lowerCAmelCase = os.getenv("""TRANSFORMERS_VERBOSITY""" , _A ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys() ) }" ) return _default_log_level def snake_case__ ( ) -> str: '''simple docstring''' return __name__.split(""".""" )[0] def snake_case__ ( ) -> logging.Logger: '''simple docstring''' return logging.getLogger(_get_library_name() ) def snake_case__ ( ) -> None: '''simple docstring''' global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return lowerCAmelCase = logging.StreamHandler() # Set sys.stderr as stream. lowerCAmelCase = sys.stderr.flush # Apply our default configuration to the library root logger. lowerCAmelCase = _get_library_root_logger() library_root_logger.addHandler(_default_handler ) library_root_logger.setLevel(_get_default_logging_level() ) lowerCAmelCase = False def snake_case__ ( ) -> None: '''simple docstring''' global _default_handler with _lock: if not _default_handler: return lowerCAmelCase = _get_library_root_logger() library_root_logger.removeHandler(_default_handler ) library_root_logger.setLevel(logging.NOTSET ) lowerCAmelCase = None def snake_case__ ( ) -> Dict: '''simple docstring''' return log_levels def snake_case__ ( _A: Optional[str] = None ) -> logging.Logger: '''simple docstring''' if name is None: lowerCAmelCase = _get_library_name() _configure_library_root_logger() return logging.getLogger(_A ) def snake_case__ ( ) -> int: '''simple docstring''' _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def snake_case__ ( _A: int ) -> None: '''simple docstring''' _configure_library_root_logger() _get_library_root_logger().setLevel(_A ) def snake_case__ ( ) -> int: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> List[str]: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> Optional[int]: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> List[str]: '''simple docstring''' return set_verbosity(_A ) def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler ) def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler ) def snake_case__ ( _A: logging.Handler ) -> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(_A ) def snake_case__ ( _A: logging.Handler ) -> None: '''simple docstring''' _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(_A ) def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() lowerCAmelCase = False def snake_case__ ( ) -> None: '''simple docstring''' _configure_library_root_logger() lowerCAmelCase = True def snake_case__ ( ) -> None: '''simple docstring''' lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: lowerCAmelCase = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" ) handler.setFormatter(_A ) def snake_case__ ( ) -> None: '''simple docstring''' lowerCAmelCase = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(_A ) def snake_case__ ( self: str , *_A: Optional[int] , **_A: Dict ) -> str: '''simple docstring''' lowerCAmelCase = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , _A ) if no_advisory_warnings: return self.warning(*_A , **_A ) __lowercase = warning_advice @functools.lru_cache(_A ) def snake_case__ ( self: List[str] , *_A: List[Any] , **_A: str ) -> List[str]: '''simple docstring''' self.warning(*_A , **_A ) __lowercase = warning_once class a__: '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument """simple docstring""" lowerCAmelCase = args[0] if args else None def __iter__( self): """simple docstring""" return iter(self._iterator) def __getattr__( self , __lowerCAmelCase): """simple docstring""" def empty_fn(*__lowerCAmelCase , **__lowerCAmelCase): # pylint: disable=unused-argument return return empty_fn def __enter__( self): """simple docstring""" return self def __exit__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase): """simple docstring""" return class a__: '''simple docstring''' def __call__( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm(*__lowerCAmelCase , **__lowerCAmelCase) else: return EmptyTqdm(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self , *__lowerCAmelCase , **__lowerCAmelCase): """simple docstring""" lowerCAmelCase = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*__lowerCAmelCase , **__lowerCAmelCase) def a_ ( self): """simple docstring""" if _tqdm_active: return tqdm_lib.tqdm.get_lock() __lowercase = _tqdm_cls() def snake_case__ ( ) -> bool: '''simple docstring''' global _tqdm_active return bool(_tqdm_active ) def snake_case__ ( ) -> Dict: '''simple docstring''' global _tqdm_active lowerCAmelCase = True hf_hub_utils.enable_progress_bars() def snake_case__ ( ) -> Any: '''simple docstring''' global _tqdm_active lowerCAmelCase = False hf_hub_utils.disable_progress_bars()
605
0
'''simple docstring''' import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _snake_case ( _a ): def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase__ ,"hidden_sizes" ) ) self.parent.assertTrue(hasattr(lowercase__ ,"num_attention_heads" ) ) class _snake_case : def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int=13 ,SCREAMING_SNAKE_CASE__ : Dict=64 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : List[str]=2 ,SCREAMING_SNAKE_CASE__ : str=1 ,SCREAMING_SNAKE_CASE__ : Tuple=16 ,SCREAMING_SNAKE_CASE__ : str=[128, 256, 384] ,SCREAMING_SNAKE_CASE__ : Any=[4, 6, 8] ,SCREAMING_SNAKE_CASE__ : List[Any]=[2, 3, 4] ,SCREAMING_SNAKE_CASE__ : List[Any]=[16, 16, 16] ,SCREAMING_SNAKE_CASE__ : List[str]=0 ,SCREAMING_SNAKE_CASE__ : Tuple=[2, 2, 2] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[2, 2, 2] ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=2 ,): SCREAMING_SNAKE_CASE:List[Any] = parent SCREAMING_SNAKE_CASE:Any = batch_size SCREAMING_SNAKE_CASE:str = image_size SCREAMING_SNAKE_CASE:Union[str, Any] = num_channels SCREAMING_SNAKE_CASE:str = kernel_size SCREAMING_SNAKE_CASE:List[Any] = stride SCREAMING_SNAKE_CASE:List[Any] = padding SCREAMING_SNAKE_CASE:List[str] = hidden_sizes SCREAMING_SNAKE_CASE:List[Any] = num_attention_heads SCREAMING_SNAKE_CASE:Tuple = depths SCREAMING_SNAKE_CASE:Union[str, Any] = key_dim SCREAMING_SNAKE_CASE:List[Any] = drop_path_rate SCREAMING_SNAKE_CASE:Any = patch_size SCREAMING_SNAKE_CASE:int = attention_ratio SCREAMING_SNAKE_CASE:List[Any] = mlp_ratio SCREAMING_SNAKE_CASE:Any = initializer_range SCREAMING_SNAKE_CASE:Tuple = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] SCREAMING_SNAKE_CASE:str = is_training SCREAMING_SNAKE_CASE:List[Any] = use_labels SCREAMING_SNAKE_CASE:str = num_labels SCREAMING_SNAKE_CASE:Optional[Any] = initializer_range def __UpperCamelCase ( self : Optional[int] ): SCREAMING_SNAKE_CASE:Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE:List[str] = None if self.use_labels: SCREAMING_SNAKE_CASE:List[str] = ids_tensor([self.batch_size] ,self.num_labels ) SCREAMING_SNAKE_CASE:List[Any] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : Optional[int] ): return LevitConfig( image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,) def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ): SCREAMING_SNAKE_CASE:Any = LevitModel(config=lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE:Dict = model(lowercase__ ) SCREAMING_SNAKE_CASE:Dict = (self.image_size, self.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE:str = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) SCREAMING_SNAKE_CASE:Tuple = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,) def __UpperCamelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int] ): SCREAMING_SNAKE_CASE:Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE:List[str] = LevitForImageClassification(lowercase__ ) model.to(lowercase__ ) model.eval() SCREAMING_SNAKE_CASE:Optional[Any] = model(lowercase__ ,labels=lowercase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:int = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = config_and_inputs SCREAMING_SNAKE_CASE:Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class _snake_case ( _a , _a , unittest.TestCase ): _A : Union[str, Any] = ( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) _A : Union[str, Any] = ( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) _A : Any = False _A : List[str] = False _A : List[Any] = False _A : Dict = False _A : Optional[Any] = False def __UpperCamelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE:List[Any] = LevitModelTester(self ) SCREAMING_SNAKE_CASE:List[Any] = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=37 ) def __UpperCamelCase ( self : List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCamelCase ( self : str ): return @unittest.skip(reason="Levit does not use inputs_embeds" ) def __UpperCamelCase ( self : Tuple ): pass @unittest.skip(reason="Levit does not support input and output embeddings" ) def __UpperCamelCase ( self : Tuple ): pass @unittest.skip(reason="Levit does not output attentions" ) def __UpperCamelCase ( self : Optional[int] ): pass def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE:Optional[int] = model_class(lowercase__ ) SCREAMING_SNAKE_CASE:List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE:Tuple = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE:Optional[int] = ["pixel_values"] self.assertListEqual(arg_names[:1] ,lowercase__ ) def __UpperCamelCase ( self : int ): def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Tuple ): SCREAMING_SNAKE_CASE:int = model_class(lowercase__ ) model.to(lowercase__ ) model.eval() with torch.no_grad(): SCREAMING_SNAKE_CASE:int = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) ) SCREAMING_SNAKE_CASE:Any = outputs.hidden_states SCREAMING_SNAKE_CASE:List[Any] = len(self.model_tester.depths ) + 1 self.assertEqual(len(lowercase__ ) ,lowercase__ ) SCREAMING_SNAKE_CASE:Dict = (self.model_tester.image_size, self.model_tester.image_size) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = image_size[0], image_size[1] for _ in range(4 ): SCREAMING_SNAKE_CASE:Dict = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) SCREAMING_SNAKE_CASE:int = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[ height * width, self.model_tester.hidden_sizes[0], ] ,) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE:Any = True check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE:int = True check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __UpperCamelCase ( self : str ): pass def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ): SCREAMING_SNAKE_CASE:Union[str, Any] = super()._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __UpperCamelCase ( self : int ): SCREAMING_SNAKE_CASE:Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase__ ) def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase__ ) def __UpperCamelCase ( self : str ): if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Tuple = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE:List[Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowercase__ ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue SCREAMING_SNAKE_CASE:Dict = model_class(lowercase__ ) model.to(lowercase__ ) model.train() SCREAMING_SNAKE_CASE:int = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) SCREAMING_SNAKE_CASE:Optional[Any] = model(**lowercase__ ).loss loss.backward() def __UpperCamelCase ( self : str ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return SCREAMING_SNAKE_CASE:Optional[int] = False SCREAMING_SNAKE_CASE:Optional[Any] = True for model_class in self.all_model_classes: if model_class in get_values(lowercase__ ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue SCREAMING_SNAKE_CASE:Tuple = model_class(lowercase__ ) model.gradient_checkpointing_enable() model.to(lowercase__ ) model.train() SCREAMING_SNAKE_CASE:Union[str, Any] = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) SCREAMING_SNAKE_CASE:List[str] = model(**lowercase__ ).loss loss.backward() def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Any = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE:List[Any] = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowercase__ ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ): SCREAMING_SNAKE_CASE:List[Any] = problem_type["title"] SCREAMING_SNAKE_CASE:Dict = problem_type["num_labels"] SCREAMING_SNAKE_CASE:Optional[Any] = model_class(lowercase__ ) model.to(lowercase__ ) model.train() SCREAMING_SNAKE_CASE:Tuple = self._prepare_for_class(lowercase__ ,lowercase__ ,return_labels=lowercase__ ) if problem_type["num_labels"] > 1: SCREAMING_SNAKE_CASE:str = inputs["labels"].unsqueeze(1 ).repeat(1 ,problem_type["num_labels"] ) SCREAMING_SNAKE_CASE:Tuple = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowercase__ ) as warning_list: SCREAMING_SNAKE_CASE:List[str] = model(**lowercase__ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( F'''Something is going wrong in the regression problem: intercepted {w.message}''' ) loss.backward() @slow def __UpperCamelCase ( self : Union[str, Any] ): for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE:Any = LevitModel.from_pretrained(lowercase__ ) self.assertIsNotNone(lowercase__ ) def A_ ( ): SCREAMING_SNAKE_CASE:List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : int ): return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __UpperCamelCase ( self : int ): SCREAMING_SNAKE_CASE:Any = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( lowercase__ ) SCREAMING_SNAKE_CASE:int = self.default_image_processor SCREAMING_SNAKE_CASE:Union[str, Any] = prepare_img() SCREAMING_SNAKE_CASE:Union[str, Any] = image_processor(images=lowercase__ ,return_tensors="pt" ).to(lowercase__ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE:List[Any] = model(**lowercase__ ) # verify the logits SCREAMING_SNAKE_CASE:Any = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape ,lowercase__ ) SCREAMING_SNAKE_CASE:Dict = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(lowercase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowercase__ ,atol=1e-4 ) )
143
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowercase: List[Any] = logging.get_logger(__name__) _lowercase: int = { '''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''', } class lowerCamelCase__ ( UpperCAmelCase ): UpperCamelCase__ ="mgp-str" def __init__( self : int , lowercase__ : List[Any]=[32, 1_28] , lowercase__ : str=4 , lowercase__ : List[str]=3 , lowercase__ : Tuple=27 , lowercase__ : Optional[Any]=38 , lowercase__ : Optional[Any]=5_02_57 , lowercase__ : Dict=3_05_22 , lowercase__ : Dict=7_68 , lowercase__ : Tuple=12 , lowercase__ : int=12 , lowercase__ : int=4.0 , lowercase__ : Tuple=True , lowercase__ : List[Any]=False , lowercase__ : int=1e-5 , lowercase__ : int=0.0 , lowercase__ : Optional[Any]=0.0 , lowercase__ : List[Any]=0.0 , lowercase__ : Union[str, Any]=False , lowercase__ : str=0.0_2 , **lowercase__ : Optional[int] , ): super().__init__(**lowercase__ ) _lowerCAmelCase = image_size _lowerCAmelCase = patch_size _lowerCAmelCase = num_channels _lowerCAmelCase = max_token_length _lowerCAmelCase = num_character_labels _lowerCAmelCase = num_bpe_labels _lowerCAmelCase = num_wordpiece_labels _lowerCAmelCase = hidden_size _lowerCAmelCase = num_hidden_layers _lowerCAmelCase = num_attention_heads _lowerCAmelCase = mlp_ratio _lowerCAmelCase = distilled _lowerCAmelCase = layer_norm_eps _lowerCAmelCase = drop_rate _lowerCAmelCase = qkv_bias _lowerCAmelCase = attn_drop_rate _lowerCAmelCase = drop_path_rate _lowerCAmelCase = output_aa_attentions _lowerCAmelCase = initializer_range
192
0
"""simple docstring""" def a__ ( __lowercase ) -> tuple[int, int]: try: _A = float(__lowercase ) except ValueError: raise ValueError("Please enter a valid number" ) _A = decimal - int(__lowercase ) if fractional_part == 0: return int(__lowercase ), 1 else: _A = len(str(__lowercase ).split("." )[1] ) _A = int(decimal * (10**number_of_frac_digits) ) _A = 10**number_of_frac_digits _A , _A = denominator, numerator while True: _A = dividend % divisor if remainder == 0: break _A , _A = divisor, remainder _A , _A = numerator / divisor, denominator / divisor return int(__lowercase ), int(__lowercase ) if __name__ == "__main__": print(f'''{decimal_to_fraction(2) = }''') print(f'''{decimal_to_fraction(89.0) = }''') print(f'''{decimal_to_fraction("67") = }''') print(f'''{decimal_to_fraction("45.0") = }''') print(f'''{decimal_to_fraction(1.5) = }''') print(f'''{decimal_to_fraction("6.25") = }''') print(f'''{decimal_to_fraction("78td") = }''')
621
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) a_ = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''', f'''decoder.layers.{i}.encoder_attn.out_proj.weight''', ) ) rename_keys.append( ( f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''', f'''decoder.layers.{i}.encoder_attn.out_proj.bias''', ) ) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''')) rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''') ) rename_keys.append( (f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''') ) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias''')) rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias''')) rename_keys.append( (f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("input_proj.weight", "input_projection.weight"), ("input_proj.bias", "input_projection.bias"), ("query_embed.weight", "query_position_embeddings.weight"), ("transformer.decoder.norm.weight", "decoder.layernorm.weight"), ("transformer.decoder.norm.bias", "decoder.layernorm.bias"), ("class_embed.weight", "class_labels_classifier.weight"), ("class_embed.bias", "class_labels_classifier.bias"), ("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"), ("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"), ("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"), ("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"), ("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"), ("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"), ("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"), ("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"), ("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"), ("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"), ("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"), ("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"), ("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"), ("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"), ("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"), ("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"), ] ) def a__ ( __lowercase , __lowercase , __lowercase ) -> List[str]: _A = state_dict.pop(__lowercase ) _A = val def a__ ( __lowercase ) -> List[str]: _A = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: _A = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) _A = value else: _A = value return new_state_dict def a__ ( __lowercase , __lowercase=False ) -> Any: _A = "" if is_panoptic: _A = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) _A = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _A = in_proj_weight[:256, :] _A = in_proj_bias[:256] _A = in_proj_weight[256:512, :] _A = in_proj_bias[256:512] _A = in_proj_weight[-256:, :] _A = in_proj_bias[-256:] def a__ ( ) -> int: _A = "http://images.cocodataset.org/val2017/000000039769.jpg" _A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ) return im @torch.no_grad() def a__ ( __lowercase , __lowercase ) -> Any: _A = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: _A = "resnet101" if "dc5" in model_name: _A = True _A = "panoptic" in model_name if is_panoptic: _A = 250 else: _A = 91 _A = "huggingface/label-files" _A = "coco-detection-id2label.json" _A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) ) _A = {int(__lowercase ): v for k, v in idalabel.items()} _A = idalabel _A = {v: k for k, v in idalabel.items()} # load image processor _A = "coco_panoptic" if is_panoptic else "coco_detection" _A = ConditionalDetrImageProcessor(format=__lowercase ) # prepare image _A = prepare_img() _A = image_processor(images=__lowercase , return_tensors="pt" ) _A = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub _A = torch.hub.load("DeppMeng/ConditionalDETR" , __lowercase , pretrained=__lowercase ).eval() _A = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: _A = "conditional_detr." + src rename_key(__lowercase , __lowercase , __lowercase ) _A = rename_backbone_keys(__lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowercase , is_panoptic=__lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them _A = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): _A = state_dict.pop(__lowercase ) _A = val elif "class_labels_classifier" in key or "bbox_predictor" in key: _A = state_dict.pop(__lowercase ) _A = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: _A = state_dict.pop(__lowercase ) _A = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): _A = state_dict.pop(__lowercase ) _A = val # finally, create HuggingFace model and load state dict _A = ConditionalDetrForSegmentation(__lowercase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowercase ) model.load_state_dict(__lowercase ) model.eval() model.push_to_hub(repo_id=__lowercase , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion _A = conditional_detr(__lowercase ) _A = model(__lowercase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__lowercase ).mkdir(exist_ok=__lowercase ) model.save_pretrained(__lowercase ) image_processor.save_pretrained(__lowercase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( "--model_name", default="conditional_detr_resnet50", type=str, help="Name of the CONDITIONAL_DETR model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) a_ = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
621
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=False , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=512 , a_=16 , a_=2 , a_=0.02 , a_=3 , a_=4 , a_=None , ): lowerCamelCase_ : Union[str, Any] = parent lowerCamelCase_ : List[str] = batch_size lowerCamelCase_ : int = seq_length lowerCamelCase_ : int = is_training lowerCamelCase_ : Optional[int] = use_input_mask lowerCamelCase_ : List[str] = use_token_type_ids lowerCamelCase_ : Dict = use_labels lowerCamelCase_ : int = vocab_size lowerCamelCase_ : Tuple = hidden_size lowerCamelCase_ : str = num_hidden_layers lowerCamelCase_ : Optional[int] = num_attention_heads lowerCamelCase_ : List[Any] = intermediate_size lowerCamelCase_ : str = hidden_act lowerCamelCase_ : Dict = hidden_dropout_prob lowerCamelCase_ : List[Any] = attention_probs_dropout_prob lowerCamelCase_ : List[str] = max_position_embeddings lowerCamelCase_ : int = type_vocab_size lowerCamelCase_ : Dict = type_sequence_label_size lowerCamelCase_ : int = initializer_range lowerCamelCase_ : Union[str, Any] = num_labels lowerCamelCase_ : str = num_choices lowerCamelCase_ : str = scope def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ : Optional[Any] = None if self.use_input_mask: lowerCamelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ : List[Any] = None if self.use_token_type_ids: lowerCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ : int = None lowerCamelCase_ : Optional[Any] = None lowerCamelCase_ : List[Any] = None if self.use_labels: lowerCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCamelCase ( self ): return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ ): lowerCamelCase_ : int = BioGptModel(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : List[str] = model(a_ , attention_mask=a_ ) lowerCamelCase_ : Union[str, Any] = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): lowerCamelCase_ : Optional[int] = BioGptForCausalLM(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , *a_ ): lowerCamelCase_ : List[Any] = BioGptModel(config=a_ ) model.to(a_ ) model.eval() # create attention mask lowerCamelCase_ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) lowerCamelCase_ : str = self.seq_length // 2 lowerCamelCase_ : Any = 0 # first forward pass lowerCamelCase_ ,lowerCamelCase_ : Any = model(a_ , attention_mask=a_ ).to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase_ : str = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids lowerCamelCase_ : int = ids_tensor((1,) , a_ ).item() + 1 lowerCamelCase_ : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) lowerCamelCase_ : Optional[int] = random_other_next_tokens # append to next input_ids and attn_mask lowerCamelCase_ : str = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ : List[str] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=a_ )] , dim=1 , ) # get two different outputs lowerCamelCase_ : Tuple = model(a_ , attention_mask=a_ )["last_hidden_state"] lowerCamelCase_ : Tuple = model(a_ , past_key_values=a_ , attention_mask=a_ )["last_hidden_state"] # select random slice lowerCamelCase_ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ : int = output_from_no_past[:, -1, random_slice_idx].detach() lowerCamelCase_ : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1E-3 ) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , *a_ ): lowerCamelCase_ : int = BioGptModel(config=a_ ).to(a_ ).eval() lowerCamelCase_ : int = torch.ones(input_ids.shape , dtype=torch.long , device=a_ ) # first forward pass lowerCamelCase_ : Union[str, Any] = model(a_ , attention_mask=a_ , use_cache=a_ ) lowerCamelCase_ ,lowerCamelCase_ : Any = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids lowerCamelCase_ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase_ : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and lowerCamelCase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) lowerCamelCase_ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) lowerCamelCase_ : Dict = model(a_ , attention_mask=a_ )["last_hidden_state"] lowerCamelCase_ : Any = model(a_ , attention_mask=a_ , past_key_values=a_ )[ "last_hidden_state" ] # select random slice lowerCamelCase_ : Any = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowerCamelCase_ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowerCamelCase_ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a_ , a_ , atol=1E-3 ) ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , *a_ , a_=False ): lowerCamelCase_ : Optional[int] = BioGptForCausalLM(a_ ) model.to(a_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() lowerCamelCase_ : List[Any] = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _UpperCamelCase ( self , a_ , *a_ ): lowerCamelCase_ : int = BioGptModel(a_ ) lowerCamelCase_ : str = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _UpperCamelCase ( self , a_ , a_ , a_ , a_ , a_ , *a_ ): lowerCamelCase_ : Any = self.num_labels lowerCamelCase_ : List[str] = BioGptForTokenClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Union[str, Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCamelCase ( self ): lowerCamelCase_ : Tuple = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) : int = config_and_inputs lowerCamelCase_ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) __UpperCAmelCase : Optional[int] = (BioGptForCausalLM,) if is_torch_available() else () __UpperCAmelCase : List[str] = ( { '''feature-extraction''': BioGptModel, '''text-classification''': BioGptForSequenceClassification, '''text-generation''': BioGptForCausalLM, '''token-classification''': BioGptForTokenClassification, '''zero-shot''': BioGptForSequenceClassification, } if is_torch_available() else {} ) __UpperCAmelCase : Optional[Any] = False def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = BioGptModelTester(self ) lowerCamelCase_ : Optional[int] = ConfigTester(self , config_class=a_ , hidden_size=37 ) def _UpperCamelCase ( self ): self.config_tester.run_common_tests() def _UpperCamelCase ( self ): lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase_ : str = type self.model_tester.create_and_check_model(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*a_ , gradient_checkpointing=a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*a_ ) @slow def _UpperCamelCase ( self ): lowerCamelCase_ : Optional[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(a_ ) lowerCamelCase_ : Optional[int] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ : List[str] = "left" # Define PAD Token = EOS Token = 50256 lowerCamelCase_ : Optional[int] = tokenizer.eos_token lowerCamelCase_ : List[str] = model.config.eos_token_id # use different length sentences to test batching lowerCamelCase_ : str = [ "Hello, my dog is a little", "Today, I", ] lowerCamelCase_ : str = tokenizer(a_ , return_tensors="pt" , padding=a_ ) lowerCamelCase_ : Any = inputs["input_ids"].to(a_ ) lowerCamelCase_ : Dict = model.generate( input_ids=a_ , attention_mask=inputs["attention_mask"].to(a_ ) , ) lowerCamelCase_ : Union[str, Any] = tokenizer(sentences[0] , return_tensors="pt" ).input_ids.to(a_ ) lowerCamelCase_ : List[str] = model.generate(input_ids=a_ ) lowerCamelCase_ : int = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item() lowerCamelCase_ : int = tokenizer(sentences[1] , return_tensors="pt" ).input_ids.to(a_ ) lowerCamelCase_ : List[str] = model.generate(input_ids=a_ , max_length=model.config.max_length - num_paddings ) lowerCamelCase_ : Optional[Any] = tokenizer.batch_decode(a_ , skip_special_tokens=a_ ) lowerCamelCase_ : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a_ ) lowerCamelCase_ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=a_ ) lowerCamelCase_ : Dict = [ "Hello, my dog is a little bit bigger than a little bit.", "Today, I have a good idea of how to use the information", ] self.assertListEqual(a_ , a_ ) self.assertListEqual(a_ , [non_padded_sentence, padded_sentence] ) @slow def _UpperCamelCase ( self ): for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ : int = BioGptModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : List[Any] = 3 lowerCamelCase_ : List[Any] = input_dict["input_ids"] lowerCamelCase_ : Optional[Any] = input_ids.ne(1 ).to(a_ ) lowerCamelCase_ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) lowerCamelCase_ : List[Any] = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : str = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _UpperCamelCase ( self ): lowerCamelCase_ ,lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase_ : Any = 3 lowerCamelCase_ : List[str] = "multi_label_classification" lowerCamelCase_ : str = input_dict["input_ids"] lowerCamelCase_ : List[str] = input_ids.ne(1 ).to(a_ ) lowerCamelCase_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) lowerCamelCase_ : List[str] = BioGptForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ : Any = model(a_ , attention_mask=a_ , labels=a_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def _UpperCamelCase ( self ): lowerCamelCase_ : List[Any] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ : List[str] = torch.tensor([[2, 4805, 9, 656, 21]] ) lowerCamelCase_ : Optional[int] = model(a_ )[0] lowerCamelCase_ : Union[str, Any] = 4_2384 lowerCamelCase_ : Union[str, Any] = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , a_ ) lowerCamelCase_ : Optional[int] = torch.tensor( [[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1E-4 ) ) @slow def _UpperCamelCase ( self ): lowerCamelCase_ : Any = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) lowerCamelCase_ : Optional[int] = BioGptForCausalLM.from_pretrained("microsoft/biogpt" ) model.to(a_ ) torch.manual_seed(0 ) lowerCamelCase_ : Dict = tokenizer("COVID-19 is" , return_tensors="pt" ).to(a_ ) lowerCamelCase_ : List[Any] = model.generate( **a_ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=a_ , ) lowerCamelCase_ : List[str] = tokenizer.decode(output_ids[0] , skip_special_tokens=a_ ) lowerCamelCase_ : Any = ( "COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the" " causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and" " territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK)," " and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and" " more than 800,000 deaths." ) self.assertEqual(a_ , a_ )
250
import warnings warnings.warn( '''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ''' '''`from accelerate import find_executable_batch_size` to avoid this warning.''', FutureWarning, )
250
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator class __SCREAMING_SNAKE_CASE : def __init__( self : Optional[int] , snake_case : int ): '''simple docstring''' A__ : Dict = value A__ : Node | None = None A__ : Node | None = None class __SCREAMING_SNAKE_CASE : def __init__( self : List[str] , snake_case : Node ): '''simple docstring''' A__ : Tuple = tree def _UpperCamelCase ( self : List[str] , snake_case : Node | None ): '''simple docstring''' if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self : List[str] ): '''simple docstring''' yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
498
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() A_ = logging.get_logger(__name__) A_ = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } A_ = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ) ->str: for attribute in key.split(""".""" ): A__ : Any = getattr(UpperCAmelCase__, UpperCAmelCase__ ) if weight_type is not None: A__ : Union[str, Any] = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape else: A__ : Optional[int] = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": A__ : List[Any] = value elif weight_type == "weight_g": A__ : Any = value elif weight_type == "weight_v": A__ : Optional[int] = value elif weight_type == "bias": A__ : Any = value else: A__ : Any = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->Union[str, Any]: A__ : Dict = [] A__ : int = fairseq_model.state_dict() A__ : Dict = hf_model.feature_extractor A__ : Any = hf_model.adapter for name, value in fairseq_dict.items(): A__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", ) A__ : Union[str, Any] = True elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ): load_adapter(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) A__ : int = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: A__ : List[Any] = True if "*" in mapped_key: A__ : Union[str, Any] = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2] A__ : Union[str, Any] = mapped_key.replace("""*""", UpperCAmelCase__ ) if "weight_g" in name: A__ : List[str] = """weight_g""" elif "weight_v" in name: A__ : Optional[Any] = """weight_v""" elif "bias" in name: A__ : Optional[int] = """bias""" elif "weight" in name: A__ : int = """weight""" else: A__ : Optional[Any] = None set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) continue if not is_used: unused_weights.append(UpperCAmelCase__ ) logger.warning(f'Unused weights: {unused_weights}' ) def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ) ->List[Any]: A__ : Dict = full_name.split("""conv_layers.""" )[-1] A__ : Optional[int] = name.split(""".""" ) A__ : int = int(items[0] ) A__ : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) A__ : Optional[Any] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) A__ : Optional[int] = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) A__ : Optional[int] = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) A__ : Any = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase__ ) def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict ) ->str: A__ : Tuple = full_name.split("""adaptor.""" )[-1] A__ : Optional[int] = name.split(""".""" ) if items[1].isdigit(): A__ : Optional[Any] = int(items[1] ) else: A__ : Any = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.' A__ : Union[str, Any] = value logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.' A__ : Tuple = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.' A__ : str = value logger.info(f'Adapter proj layer bias was initialized from {full_name}.' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.' A__ : Union[str, Any] = value logger.info(f'Adapter proj layer weight was initialized from {full_name}.' ) elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.' A__ : Dict = value logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.' A__ : Optional[int] = value logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase__ ) def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[Any]: A__ , A__ : Union[str, Any] = emb.weight.shape A__ : List[str] = nn.Linear(UpperCAmelCase__, UpperCAmelCase__, bias=UpperCAmelCase__ ) A__ : List[Any] = emb.weight.data return lin_layer @torch.no_grad() def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, ) ->str: A__ : Tuple = WavaVecaConfig.from_pretrained( UpperCAmelCase__, add_adapter=UpperCAmelCase__, adapter_stride=UpperCAmelCase__, adapter_kernel_size=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, output_hidden_size=UpperCAmelCase__, ) A__ : List[Any] = MBartConfig.from_pretrained(UpperCAmelCase__ ) # load model A__ , A__ , A__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={ """config_yaml""": config_yaml_path, """data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path, """load_pretrained_decoder_from""": None, }, ) A__ : List[Any] = model[0].eval() # load feature extractor A__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__, use_auth_token=UpperCAmelCase__ ) # set weights for wav2vec2 encoder A__ : Dict = WavaVecaModel(UpperCAmelCase__ ) recursively_load_weights_wavaveca(model.encoder, UpperCAmelCase__ ) # load decoder weights A__ : Any = MBartForCausalLM(UpperCAmelCase__ ) A__ , A__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=UpperCAmelCase__ ) logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) A__ : Dict = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__, decoder=UpperCAmelCase__ ) A__ : Optional[Any] = False A__ : Optional[Any] = MBartaaTokenizer(UpperCAmelCase__ ) tokenizer.save_pretrained(UpperCAmelCase__ ) A__ : Dict = hf_wavavec.config.to_dict() A__ : List[Any] = tokenizer.pad_token_id A__ : Optional[Any] = tokenizer.bos_token_id A__ : List[Any] = tokenizer.eos_token_id A__ : Tuple = """mbart50""" A__ : List[str] = """wav2vec2""" A__ : Optional[int] = tokenizer.eos_token_id A__ : int = 2_5_0_0_0_4 A__ : Dict = tokenizer.eos_token_id A__ : str = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ ) hf_wavavec.save_pretrained(UpperCAmelCase__ ) feature_extractor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-xls-r-1b''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/mbart-large-50-one-to-many-mmt''', type=str, help='''Path to hf decoder checkpoint config''', ) parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''') parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''') parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''') parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''') parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''') A_ = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
498
1
import numpy as np import datasets lowerCamelCase : List[str] = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' lowerCamelCase : List[Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' lowerCamelCase : Tuple = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): def UpperCAmelCase__ ( self : Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'X': datasets.Sequence(datasets.Value('float' , id='sequence' ) , id='X' ), } ) , ) def UpperCAmelCase__ ( self : Any , _lowerCamelCase : int , _lowerCamelCase : List[str] ): # convert to numpy arrays snake_case__ : Tuple = np.array(_lowerCamelCase ) snake_case__ : Optional[Any] = np.array(_lowerCamelCase ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('Expected `X` to be a 2D vector' ) if len(reference_distribution.shape ) != 2: raise ValueError('Expected `reference_distribution` to be a 2D vector' ) if reference_distribution.shape[0] < 2: raise ValueError( 'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension' ) # Get mahalanobis distance for each prediction snake_case__ : str = X - np.mean(_lowerCamelCase ) snake_case__ : Dict = np.cov(reference_distribution.T ) try: snake_case__ : List[Any] = np.linalg.inv(_lowerCamelCase ) except np.linalg.LinAlgError: snake_case__ : Dict = np.linalg.pinv(_lowerCamelCase ) snake_case__ : Optional[int] = np.dot(_lowerCamelCase , _lowerCamelCase ) snake_case__ : str = np.dot(_lowerCamelCase , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
170
from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def lowercase__( A ): # A local function to see if a dot lands in the circle. def is_in_circle(A , A ) -> bool: snake_case__ : Optional[Any] = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle snake_case__ : Optional[int] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(A ) ) # The ratio of the area for circle to square is pi/4. snake_case__ : Optional[Any] = proportion * 4 print(f'''The estimated value of pi is {pi_estimate}''' ) print(f'''The numpy value of pi is {pi}''' ) print(f'''The total error is {abs(pi - pi_estimate )}''' ) def lowercase__( A , A , A = 0.0 , A = 1.0 , ): return mean( function_to_integrate(uniform(A , A ) ) for _ in range(A ) ) * (max_value - min_value) def lowercase__( A , A = 0.0 , A = 1.0 ): def identity_function(A ) -> float: return x snake_case__ : List[Any] = area_under_curve_estimator( A , A , A , A ) snake_case__ : List[str] = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {expected_value}''' ) print(f'''Total error is {abs(estimated_value - expected_value )}''' ) print('******************' ) def lowercase__( A ): def function_to_integrate(A ) -> float: return sqrt(4.0 - x * x ) snake_case__ : Tuple = area_under_curve_estimator( A , A , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(f'''Estimated value is {estimated_value}''' ) print(f'''Expected value is {pi}''' ) print(f'''Total error is {abs(estimated_value - pi )}''' ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
170
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor class snake_case__ ( unittest.TestCase): def __init__( self : str , _A : str , _A : Any=7 , _A : List[str]=3 , _A : List[str]=18 , _A : Optional[Any]=30 , _A : Union[str, Any]=4_00 , _A : List[str]=True , _A : Optional[int]=None , _A : Optional[Any]=True , _A : Optional[int]=None , _A : Tuple=True , _A : Any=[0.48_145_466, 0.4_578_275, 0.40_821_073] , _A : Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , _A : int=True , ) -> List[str]: UpperCAmelCase_ : Dict = size if size is not None else {"height": 2_24, "width": 2_24} UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18} UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Any = image_size UpperCAmelCase_ : List[str] = min_resolution UpperCAmelCase_ : Any = max_resolution UpperCAmelCase_ : List[Any] = do_resize UpperCAmelCase_ : Optional[Any] = size UpperCAmelCase_ : int = do_center_crop UpperCAmelCase_ : Optional[int] = crop_size UpperCAmelCase_ : str = do_normalize UpperCAmelCase_ : List[str] = image_mean UpperCAmelCase_ : Any = image_std UpperCAmelCase_ : Optional[Any] = do_convert_rgb def A ( self : Dict ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_convert_rgb": self.do_convert_rgb, } def A ( self : List[str] , _A : Dict=False , _A : Union[str, Any]=False , _A : Tuple=False ) -> Optional[int]: assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time" if equal_resolution: UpperCAmelCase_ : Tuple = [] for i in range(self.batch_size ): image_inputs.append( np.random.randint( 2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) ) else: UpperCAmelCase_ : Tuple = [] for i in range(self.batch_size ): UpperCAmelCase_ : List[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 ) image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) ) if not numpify and not torchify: # PIL expects the channel dimension as last dimension UpperCAmelCase_ : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs] if torchify: UpperCAmelCase_ : Optional[Any] = [torch.from_numpy(_A ) for x in image_inputs] return image_inputs @require_torch @require_vision class snake_case__ ( _UpperCamelCase , unittest.TestCase): a_ = ChineseCLIPImageProcessor if is_vision_available() else None def A ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = ChineseCLIPImageProcessingTester(self , do_center_crop=_A ) @property def A ( self : Optional[int] ) -> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) ) def A ( self : Any ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 2_24, '''width''': 2_24} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) UpperCAmelCase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def A ( self : List[str] ) -> List[str]: pass def A ( self : Tuple ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : int = self.image_processor_tester.prepare_inputs(equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : int = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : Dict ) -> Tuple: # Initialize image_processing UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def A ( self : str ) -> Any: # Initialize image_processing UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) @require_torch @require_vision class snake_case__ ( _UpperCamelCase , unittest.TestCase): a_ = ChineseCLIPImageProcessor if is_vision_available() else None def A ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_A ) UpperCAmelCase_ : List[Any] = 3 @property def A ( self : Tuple ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def A ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''center_crop''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_convert_rgb''' ) ) def A ( self : int ) -> str: pass def A ( self : Optional[Any] ) -> int: # Initialize image_processing UpperCAmelCase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched UpperCAmelCase_ : Optional[int] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.expected_encoded_image_num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
715
'''simple docstring''' import numpy as np def __UpperCAmelCase ( A : np.ndarray , A : np.ndarray , A : float = 1e-12 , A : int = 1_0_0 , ) -> tuple[float, np.ndarray]: assert np.shape(A )[0] == np.shape(A )[1] # Ensure proper dimensionality. assert np.shape(A )[0] == np.shape(A )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(A ) == np.iscomplexobj(A ) UpperCAmelCase_ : int = np.iscomplexobj(A ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(A , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. UpperCAmelCase_ : Tuple = False UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Optional[Any] = 1e12 while not convergence: # Multiple matrix by the vector. UpperCAmelCase_ : Optional[int] = np.dot(A , A ) # Normalize the resulting output vector. UpperCAmelCase_ : Dict = w / np.linalg.norm(A ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) UpperCAmelCase_ : Tuple = vector.conj().T if is_complex else vector.T UpperCAmelCase_ : List[Any] = np.dot(A , np.dot(A , A ) ) # Check convergence. UpperCAmelCase_ : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: UpperCAmelCase_ : Any = True UpperCAmelCase_ : int = lambda_ if is_complex: UpperCAmelCase_ : Dict = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ) -> None: UpperCAmelCase_ : str = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] ) UpperCAmelCase_ : Optional[Any] = np.array([4_1, 4, 2_0] ) UpperCAmelCase_ : str = real_input_matrix.astype(np.complexaaa ) UpperCAmelCase_ : Optional[Any] = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T UpperCAmelCase_ : int = np.array([4_1, 4, 2_0] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": UpperCAmelCase_ : Any = real_input_matrix UpperCAmelCase_ : Any = real_vector elif problem_type == "complex": UpperCAmelCase_ : int = complex_input_matrix UpperCAmelCase_ : str = complex_vector # Our implementation. UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = power_iteration(A , A ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = np.linalg.eigh(A ) # Last eigenvalue is the maximum one. UpperCAmelCase_ : Any = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. UpperCAmelCase_ : Dict = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(A ) - np.abs(A ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
216
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ = { """configuration_informer""": [ """INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """InformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ """INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """InformerForPrediction""", """InformerModel""", """InformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
654
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int=13 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : List[Any]=16 , SCREAMING_SNAKE_CASE_ : List[Any]=36 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : str=6 , SCREAMING_SNAKE_CASE_ : List[str]=6 , SCREAMING_SNAKE_CASE_ : List[Any]=37 , SCREAMING_SNAKE_CASE_ : List[str]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Any=512 , SCREAMING_SNAKE_CASE_ : Optional[Any]=16 , SCREAMING_SNAKE_CASE_ : Tuple=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=0.0_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Dict=4 , SCREAMING_SNAKE_CASE_ : List[Any]=None , ): lowerCamelCase__ = parent lowerCamelCase__ = batch_size lowerCamelCase__ = seq_length lowerCamelCase__ = is_training lowerCamelCase__ = use_input_mask lowerCamelCase__ = use_token_type_ids lowerCamelCase__ = use_labels lowerCamelCase__ = vocab_size lowerCamelCase__ = embedding_size lowerCamelCase__ = hidden_size lowerCamelCase__ = num_hidden_layers lowerCamelCase__ = num_hidden_groups lowerCamelCase__ = num_attention_heads lowerCamelCase__ = intermediate_size lowerCamelCase__ = hidden_act lowerCamelCase__ = hidden_dropout_prob lowerCamelCase__ = attention_probs_dropout_prob lowerCamelCase__ = max_position_embeddings lowerCamelCase__ = type_vocab_size lowerCamelCase__ = type_sequence_label_size lowerCamelCase__ = initializer_range lowerCamelCase__ = num_labels lowerCamelCase__ = num_choices lowerCamelCase__ = scope def __UpperCAmelCase ( self : Any ): lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase__ = None if self.use_input_mask: lowerCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase__ = None if self.use_token_type_ids: lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = None if self.use_labels: lowerCamelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase__ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : List[str] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any ): lowerCamelCase__ = AlbertModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple ): lowerCamelCase__ = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str ): lowerCamelCase__ = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ): lowerCamelCase__ = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ): lowerCamelCase__ = self.num_labels lowerCamelCase__ = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ): lowerCamelCase__ = self.num_choices lowerCamelCase__ = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowerCamelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase__ = model( SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self : Union[str, Any] ): lowerCamelCase__ = self.prepare_config_and_inputs() ( ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ( lowerCamelCase__ ) , ) = config_and_inputs lowerCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ): snake_case = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) snake_case = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) snake_case = True def __UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict=False ): lowerCamelCase__ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE_ ): lowerCamelCase__ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def __UpperCAmelCase ( self : Union[str, Any] ): lowerCamelCase__ = AlbertModelTester(self ) lowerCamelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 ) def __UpperCAmelCase ( self : Tuple ): self.config_tester.run_common_tests() def __UpperCAmelCase ( self : str ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Union[str, Any] ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Any ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : str ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : List[Any] ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Optional[int] ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ ) def __UpperCAmelCase ( self : Any ): lowerCamelCase__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase__ = type self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) @slow def __UpperCAmelCase ( self : Optional[Any] ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase__ = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def __UpperCAmelCase ( self : List[str] ): lowerCamelCase__ = AlbertModel.from_pretrained("""albert-base-v2""" ) lowerCamelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowerCamelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0] lowerCamelCase__ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ ) lowerCamelCase__ = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
129
0
import os import pickle import unittest from transformers import AutoTokenizer from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.models.bert_japanese.tokenization_bert_japanese import ( VOCAB_FILES_NAMES, BertJapaneseTokenizer, CharacterTokenizer, JumanppTokenizer, MecabTokenizer, SudachiTokenizer, WordpieceTokenizer, ) from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi from ...test_tokenization_common import TokenizerTesterMixin @custom_tokenizers class __snake_case (__UpperCAmelCase , unittest.TestCase ): lowerCAmelCase__ = BertJapaneseTokenizer lowerCAmelCase__ = False lowerCAmelCase__ = True def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: '''simple docstring''' super().setUp() _lowerCAmelCase : List[str] = [ """[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは""", """世界""", """##世界""", """、""", """##、""", """。""", """##。""", ] _lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Tuple: '''simple docstring''' _lowerCAmelCase : str = """こんにちは、世界。 \nこんばんは、世界。""" _lowerCAmelCase : Tuple = """こんにちは 、 世界 。 こんばんは 、 世界 。""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : List[str] ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase , _lowerCAmelCase : str = self.get_input_output_texts(UpperCAmelCase_ ) _lowerCAmelCase : str = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) _lowerCAmelCase : List[str] = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) return text, ids def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: '''simple docstring''' _lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file ) _lowerCAmelCase : Any = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" ) self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: '''simple docstring''' _lowerCAmelCase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" ) self.assertIsNotNone(UpperCAmelCase_ ) _lowerCAmelCase : Dict = """こんにちは、世界。\nこんばんは、世界。""" _lowerCAmelCase : Tuple = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _lowerCAmelCase : int = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(UpperCAmelCase_ , """wb""" ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , """rb""" ) as handle: _lowerCAmelCase : List[str] = pickle.load(UpperCAmelCase_ ) _lowerCAmelCase : Union[str, Any] = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' _lowerCAmelCase : List[str] = MecabTokenizer(mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' try: _lowerCAmelCase : Dict = MecabTokenizer(mecab_dic="""unidic_lite""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: '''simple docstring''' try: _lowerCAmelCase : List[str] = MecabTokenizer(mecab_dic="""unidic""" ) except ModuleNotFoundError: return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: '''simple docstring''' _lowerCAmelCase : Any = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self : int ) -> Tuple: '''simple docstring''' try: _lowerCAmelCase : Any = MecabTokenizer( do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" ) except RuntimeError: # if dict doesn't exist in the system, previous code raises this error. return self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: '''simple docstring''' _lowerCAmelCase : Optional[int] = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic="""ipadic""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: '''simple docstring''' _lowerCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" ) self.assertIsNotNone(UpperCAmelCase_ ) _lowerCAmelCase : Tuple = """こんにちは、世界。\nこんばんは、世界。""" _lowerCAmelCase : List[str] = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _lowerCAmelCase : int = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(UpperCAmelCase_ , """wb""" ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , """rb""" ) as handle: _lowerCAmelCase : Union[str, Any] = pickle.load(UpperCAmelCase_ ) _lowerCAmelCase : Optional[int] = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : List[Any] = SudachiTokenizer(sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: '''simple docstring''' _lowerCAmelCase : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: '''simple docstring''' _lowerCAmelCase : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" ) self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: '''simple docstring''' _lowerCAmelCase : Optional[int] = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: '''simple docstring''' _lowerCAmelCase : Optional[Any] = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , ) @require_sudachi def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase : int = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type="""core""" ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" ) self.assertIsNotNone(UpperCAmelCase_ ) _lowerCAmelCase : List[str] = """こんにちは、世界。\nこんばんは、世界。""" _lowerCAmelCase : str = tokenizer.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] ) _lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" ) with open(UpperCAmelCase_ , """wb""" ) as handle: pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ ) with open(UpperCAmelCase_ , """rb""" ) as handle: _lowerCAmelCase : Tuple = pickle.load(UpperCAmelCase_ ) _lowerCAmelCase : Dict = tokenizer_new.tokenize(UpperCAmelCase_ ) self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: '''simple docstring''' _lowerCAmelCase : List[Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' _lowerCAmelCase : Tuple = JumanppTokenizer(do_lower_case=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase : List[str] = JumanppTokenizer(normalize_text=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : int = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , ) @require_jumanpp def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' _lowerCAmelCase : Union[str, Any] = JumanppTokenizer() self.assertListEqual( tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: '''simple docstring''' _lowerCAmelCase : List[str] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""] _lowerCAmelCase : List[str] = {} for i, token in enumerate(UpperCAmelCase_ ): _lowerCAmelCase : Optional[int] = i _lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] ) self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] ) def SCREAMING_SNAKE_CASE ( self : int ) -> str: '''simple docstring''' _lowerCAmelCase : int = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" ) _lowerCAmelCase : Tuple = tokenizer.subword_tokenizer _lowerCAmelCase : Optional[Any] = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" ) self.assertListEqual(UpperCAmelCase_ , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] ) _lowerCAmelCase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" ) self.assertListEqual(UpperCAmelCase_ , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] ) def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: '''simple docstring''' _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" ) _lowerCAmelCase : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCAmelCase_ ) _lowerCAmelCase : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCAmelCase_ ) _lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) _lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __snake_case (__UpperCAmelCase , unittest.TestCase ): lowerCAmelCase__ = BertJapaneseTokenizer lowerCAmelCase__ = False def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: '''simple docstring''' super().setUp() _lowerCAmelCase : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] _lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def SCREAMING_SNAKE_CASE ( self : Any , **_UpperCAmelCase : Optional[int] ) -> List[Any]: '''simple docstring''' return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **UpperCAmelCase_ ) def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[Any] ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : Optional[int] = """こんにちは、世界。 \nこんばんは、世界。""" _lowerCAmelCase : Dict = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: '''simple docstring''' pass # TODO add if relevant def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : List[str] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" ) _lowerCAmelCase : Any = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" ) self.assertListEqual( UpperCAmelCase_ , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: '''simple docstring''' _lowerCAmelCase : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""] _lowerCAmelCase : Optional[int] = {} for i, token in enumerate(UpperCAmelCase_ ): _lowerCAmelCase : Dict = i _lowerCAmelCase : Optional[int] = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token="""[UNK]""" ) self.assertListEqual(tokenizer.tokenize("""""" ) , [] ) self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] ) self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] ) def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" ) _lowerCAmelCase : Any = tokenizer.encode("""ありがとう。""" , add_special_tokens=UpperCAmelCase_ ) _lowerCAmelCase : Any = tokenizer.encode("""どういたしまして。""" , add_special_tokens=UpperCAmelCase_ ) _lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ ) _lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ ) # 2 is for "[CLS]", 3 is for "[SEP]" assert encoded_sentence == [2] + text + [3] assert encoded_pair == [2] + text + [3] + text_a + [3] @custom_tokenizers class __snake_case (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: '''simple docstring''' _lowerCAmelCase : str = """cl-tohoku/bert-base-japanese""" _lowerCAmelCase : int = AutoTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ ) class __snake_case (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: '''simple docstring''' _lowerCAmelCase : str = """cl-tohoku/bert-base-japanese""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) ) _lowerCAmelCase : Union[str, Any] = """bert-base-cased""" with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm: BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ ) self.assertTrue( cm.records[0].message.startswith( """The tokenizer class you load from this checkpoint is not the same type as the class this function""" """ is called from.""" ) )
711
from __future__ import annotations from collections.abc import Iterator from typing import Any class __snake_case : def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> Dict: '''simple docstring''' _lowerCAmelCase : Any = data _lowerCAmelCase : Node | None = None class __snake_case : def __init__( self : Tuple ) -> Tuple: '''simple docstring''' _lowerCAmelCase : str = None _lowerCAmelCase : List[Any] = None def __iter__( self : str ) -> Iterator[Any]: '''simple docstring''' _lowerCAmelCase : Optional[int] = self.head while self.head: yield node.data _lowerCAmelCase : List[str] = node.next if node == self.head: break def __len__( self : Union[str, Any] ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self : Optional[int] ) -> str: '''simple docstring''' return "->".join(str(_UpperCAmelCase ) for item in iter(self ) ) def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Any ) -> None: '''simple docstring''' self.insert_nth(len(self ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Any ) -> None: '''simple docstring''' self.insert_nth(0 , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> None: '''simple docstring''' if index < 0 or index > len(self ): raise IndexError("""list index out of range.""" ) _lowerCAmelCase : Optional[int] = Node(_UpperCAmelCase ) if self.head is None: _lowerCAmelCase : Tuple = new_node # first node points itself _lowerCAmelCase : int = new_node elif index == 0: # insert at head _lowerCAmelCase : Tuple = self.head _lowerCAmelCase : int = new_node else: _lowerCAmelCase : Union[str, Any] = self.head for _ in range(index - 1 ): _lowerCAmelCase : Optional[Any] = temp.next _lowerCAmelCase : Optional[int] = temp.next _lowerCAmelCase : Optional[int] = new_node if index == len(self ) - 1: # insert at tail _lowerCAmelCase : Optional[Any] = new_node def SCREAMING_SNAKE_CASE ( self : Dict ) -> str: '''simple docstring''' return self.delete_nth(0 ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : int = 0 ) -> Any: '''simple docstring''' if not 0 <= index < len(self ): raise IndexError("""list index out of range.""" ) _lowerCAmelCase : Optional[Any] = self.head if self.head == self.tail: # just one node _lowerCAmelCase : Optional[Any] = None elif index == 0: # delete head node _lowerCAmelCase : str = self.tail.next.next _lowerCAmelCase : List[str] = self.head.next else: _lowerCAmelCase : List[str] = self.head for _ in range(index - 1 ): _lowerCAmelCase : List[Any] = temp.next _lowerCAmelCase : List[str] = temp.next _lowerCAmelCase : Dict = temp.next.next if index == len(self ) - 1: # delete at tail _lowerCAmelCase : Optional[int] = temp return delete_node.data def SCREAMING_SNAKE_CASE ( self : int ) -> bool: '''simple docstring''' return len(self ) == 0 def _UpperCAmelCase (): '''simple docstring''' _lowerCAmelCase : Dict = CircularLinkedList() assert len(UpperCamelCase_ ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCamelCase_ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCamelCase_ ) == i circular_linked_list.insert_nth(UpperCamelCase_ , i + 1 ) assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCamelCase_ ) == "->".join(str(UpperCamelCase_ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
196
0
"""simple docstring""" import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class __lowercase : @property def __lowercase ( self : Union[str, Any] ): '''simple docstring''' return self.get_dummy_input() @property def __lowercase ( self : Dict ): '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." ) def __lowercase ( self : List[str] ,A : List[str]=True ,A : Optional[int]=False ,A : Any=False ,A : int=False ,): '''simple docstring''' UpperCAmelCase__ : int = 4 UpperCAmelCase__ : Tuple = 32 UpperCAmelCase__ : Optional[Any] = (32, 32) UpperCAmelCase__ : Dict = torch.manual_seed(0 ) UpperCAmelCase__ : int = torch.device(A ) UpperCAmelCase__ : List[str] = (batch_size, num_channels) + sizes UpperCAmelCase__ : List[Any] = randn_tensor(A ,generator=A ,device=A ) UpperCAmelCase__ : List[Any] = {"""hidden_states""": hidden_states} if include_temb: UpperCAmelCase__ : List[Any] = 128 UpperCAmelCase__ : Any = randn_tensor((batch_size, temb_channels) ,generator=A ,device=A ) if include_res_hidden_states_tuple: UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(1 ) UpperCAmelCase__ : Tuple = (randn_tensor(A ,generator=A ,device=A ),) if include_encoder_hidden_states: UpperCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(A ) if include_skip_sample: UpperCAmelCase__ : Dict = randn_tensor(((batch_size, 3) + sizes) ,generator=A ,device=A ) return dummy_input def __lowercase ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = { """in_channels""": 32, """out_channels""": 32, """temb_channels""": 128, } if self.block_type == "up": UpperCAmelCase__ : Union[str, Any] = 32 if self.block_type == "mid": init_dict.pop("""out_channels""" ) UpperCAmelCase__ : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def __lowercase ( self : List[Any] ,A : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : List[Any] = self.block_class(**A ) unet_block.to(A ) unet_block.eval() with torch.no_grad(): UpperCAmelCase__ : Tuple = unet_block(**A ) if isinstance(A ,A ): UpperCAmelCase__ : Optional[Any] = output[0] self.assertEqual(output.shape ,self.output_shape ) UpperCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:] UpperCAmelCase__ : Dict = torch.tensor(A ).to(A ) assert torch_all_close(output_slice.flatten() ,A ,atol=5e-3 ) @unittest.skipIf(torch_device == """mps""" ,"""Training is not supported in mps""" ) def __lowercase ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common() UpperCAmelCase__ : List[str] = self.block_class(**A ) model.to(A ) model.train() UpperCAmelCase__ : List[Any] = model(**A ) if isinstance(A ,A ): UpperCAmelCase__ : Tuple = output[0] UpperCAmelCase__ : Any = torch.device(A ) UpperCAmelCase__ : Dict = randn_tensor(output.shape ,device=A ) UpperCAmelCase__ : str = torch.nn.functional.mse_loss(A ,A ) loss.backward()
65
"""simple docstring""" from math import factorial def lowerCAmelCase ( __UpperCamelCase = 100 ): '''simple docstring''' return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) ) if __name__ == "__main__": print(solution(int(input('Enter the Number: ').strip())))
65
1
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( ImageTextPipelineOutput, UniDiffuserPipeline, ) else: from .modeling_text_decoder import UniDiffuserTextDecoder from .modeling_uvit import UniDiffuserModel, UTransformeraDModel from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
712
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib UpperCamelCase_ = get_logger() UpperCamelCase_ = None class _SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ): def __init__(self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase): '''simple docstring''' super().__init__(features=UpperCAmelCase) import jax from jaxlib.xla_client import Device if isinstance(UpperCAmelCase , UpperCAmelCase): raise ValueError( f"""Expected {device} to be a `str` not {type(UpperCAmelCase)}, as `jaxlib.xla_extension.Device` """ '''is not serializable neither with `pickle` nor with `dill`. Instead you can surround ''' '''the device with `str()` to get its string identifier that will be internally mapped ''' '''to the actual `jaxlib.xla_extension.Device`.''') __UpperCAmelCase =device if isinstance(UpperCAmelCase , UpperCAmelCase) else str(jax.devices()[0]) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __UpperCAmelCase =self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys()): logger.warning( f"""Device with string identifier {self.device} not listed among the available """ f"""devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default """ f"""device: {str(jax.devices()[0])}.""") __UpperCAmelCase =str(jax.devices()[0]) __UpperCAmelCase =jnp_array_kwargs @staticmethod def A__ (): '''simple docstring''' import jax return {str(UpperCAmelCase): device for device in jax.devices()} def A__ (self , UpperCAmelCase): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , UpperCAmelCase) and column: if all( isinstance(UpperCAmelCase , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column): return jnp.stack(UpperCAmelCase , axis=0) return column def A__ (self , UpperCAmelCase): '''simple docstring''' import jax import jax.numpy as jnp if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase))): return value elif isinstance(UpperCAmelCase , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character): return value.tolist() __UpperCAmelCase ={} if isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: __UpperCAmelCase ={'''dtype''': jnp.intaa} else: __UpperCAmelCase ={'''dtype''': jnp.intaa} elif isinstance(UpperCAmelCase , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating): __UpperCAmelCase ={'''dtype''': jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(UpperCAmelCase , PIL.Image.Image): __UpperCAmelCase =np.asarray(UpperCAmelCase) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: __UpperCAmelCase =self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device]): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs}) def A__ (self , UpperCAmelCase): '''simple docstring''' import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(UpperCAmelCase , torch.Tensor): return self._tensorize(data_struct.detach().cpu().numpy()[()]) if hasattr(UpperCAmelCase , '''__array__''') and not isinstance(UpperCAmelCase , jax.Array): __UpperCAmelCase =data_struct.__array__() # support for nested types like struct of list of struct if isinstance(UpperCAmelCase , np.ndarray): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct]) elif isinstance(UpperCAmelCase , (list, tuple)): return self._consolidate([self.recursive_tensorize(UpperCAmelCase) for substruct in data_struct]) return self._tensorize(UpperCAmelCase) def A__ (self , UpperCAmelCase): '''simple docstring''' return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase) def A__ (self , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.numpy_arrow_extractor().extract_row(UpperCAmelCase) __UpperCAmelCase =self.python_features_decoder.decode_row(UpperCAmelCase) return self.recursive_tensorize(UpperCAmelCase) def A__ (self , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.numpy_arrow_extractor().extract_column(UpperCAmelCase) __UpperCAmelCase =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0]) __UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase) __UpperCAmelCase =self._consolidate(UpperCAmelCase) return column def A__ (self , UpperCAmelCase): '''simple docstring''' __UpperCAmelCase =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase) __UpperCAmelCase =self.python_features_decoder.decode_batch(UpperCAmelCase) __UpperCAmelCase =self.recursive_tensorize(UpperCAmelCase) for column_name in batch: __UpperCAmelCase =self._consolidate(batch[column_name]) return batch
142
0
from __future__ import annotations from collections import deque class A__ : '''simple docstring''' def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : list[str] ): """simple docstring""" UpperCamelCase = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(_SCREAMING_SNAKE_CASE ) self.set_fail_transitions() def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" UpperCamelCase = 0 for character in keyword: UpperCamelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) UpperCamelCase = len(self.adlist ) - 1 else: UpperCamelCase = next_state self.adlist[current_state]["output"].append(_SCREAMING_SNAKE_CASE ) def _SCREAMING_SNAKE_CASE ( self : Any ): """simple docstring""" UpperCamelCase = deque() for node in self.adlist[0]["next_states"]: q.append(_SCREAMING_SNAKE_CASE ) UpperCamelCase = 0 while q: UpperCamelCase = q.popleft() for child in self.adlist[r]["next_states"]: q.append(_SCREAMING_SNAKE_CASE ) UpperCamelCase = self.adlist[r]['fail_state'] while ( self.find_next_state(_SCREAMING_SNAKE_CASE , self.adlist[child]['value'] ) is None and state != 0 ): UpperCamelCase = self.adlist[state]['fail_state'] UpperCamelCase = self.find_next_state( _SCREAMING_SNAKE_CASE , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: UpperCamelCase = 0 UpperCamelCase = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : str ): """simple docstring""" UpperCamelCase = {} # returns a dict with keywords and list of its occurrences UpperCamelCase = 0 for i in range(len(_SCREAMING_SNAKE_CASE ) ): while ( self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) is None and current_state != 0 ): UpperCamelCase = self.adlist[current_state]['fail_state'] UpperCamelCase = self.find_next_state(_SCREAMING_SNAKE_CASE , string[i] ) if next_state is None: UpperCamelCase = 0 else: UpperCamelCase = next_state for key in self.adlist[current_state]["output"]: if key not in result: UpperCamelCase = [] result[key].append(i - len(_SCREAMING_SNAKE_CASE ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
280
__magic_name__ : str = 8.314_4598 def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> float: """simple docstring""" if temperature < 0: raise Exception('Temperature cannot be less than 0 K') if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol') else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example __magic_name__ : List[Any] = 300 __magic_name__ : Union[str, Any] = 28 __magic_name__ : str = rms_speed_of_molecule(temperature, molar_mass) print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
280
1
'''simple docstring''' from functools import lru_cache def _UpperCamelCase ( _a : Optional[int] ): """simple docstring""" __UpperCamelCase : Optional[Any] = 2 __UpperCamelCase : List[str] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_a ) if n > 1: factors.add(_a ) return factors @lru_cache def _UpperCamelCase ( _a : Optional[int] ): """simple docstring""" return len(unique_prime_factors(_a ) ) def _UpperCamelCase ( _a : List[str] ): """simple docstring""" return len(set(_a ) ) in (0, 1) def _UpperCamelCase ( _a : Tuple ): """simple docstring""" __UpperCamelCase : Dict = 2 while True: # Increment each value of a generated range __UpperCamelCase : str = [base + i for i in range(_a )] # Run elements through out unique_prime_factors function # Append our target number to the end. __UpperCamelCase : Any = [upf_len(_a ) for x in group] checker.append(_a ) # If all numbers in the list are equal, return the group variable. if equality(_a ): return group # Increment our base variable by 1 base += 1 def _UpperCamelCase ( _a : str = 4 ): """simple docstring""" __UpperCamelCase : Optional[int] = run(_a ) return results[0] if len(_a ) else None if __name__ == "__main__": print(solution())
709
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a= logging.get_logger(__name__) a= { '''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_config_file''': '''tokenizer_config.json''', } a= { '''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''}, '''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''}, '''tokenizer_config_file''': { '''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json''' }, } a= {'''facebook/blenderbot-3B''': 1_2_8} class __lowercase ( _lowerCamelCase ): """simple docstring""" SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask'''] SCREAMING_SNAKE_CASE__ = BlenderbotTokenizer def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ): super().__init__( _lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , ) __UpperCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space: __UpperCamelCase : Any = getattr(_lowerCamelCase , pre_tok_state.pop('type' ) ) __UpperCamelCase : Dict = add_prefix_space __UpperCamelCase : Optional[Any] = pre_tok_class(**_lowerCamelCase ) __UpperCamelCase : str = add_prefix_space __UpperCamelCase : Optional[int] = 'post_processor' __UpperCamelCase : Tuple = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) if tokenizer_component_instance: __UpperCamelCase : str = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: __UpperCamelCase : List[Any] = tuple(state['sep'] ) if "cls" in state: __UpperCamelCase : str = tuple(state['cls'] ) __UpperCamelCase : Tuple = False if state.get('add_prefix_space' , _lowerCamelCase ) != add_prefix_space: __UpperCamelCase : Dict = add_prefix_space __UpperCamelCase : str = True if state.get('trim_offsets' , _lowerCamelCase ) != trim_offsets: __UpperCamelCase : int = trim_offsets __UpperCamelCase : Any = True if changes_to_apply: __UpperCamelCase : Dict = getattr(_lowerCamelCase , state.pop('type' ) ) __UpperCamelCase : Any = component_class(**_lowerCamelCase ) setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCAmelCase ( self ): if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : int = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value __UpperCamelCase : Optional[Any] = value def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ): __UpperCamelCase : Dict = kwargs.get('is_split_into_words' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ): __UpperCamelCase : Dict = kwargs.get('is_split_into_words' , _lowerCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): __UpperCamelCase : List[str] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase ) def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): __UpperCamelCase : Union[str, Any] = [self.sep_token_id] __UpperCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ): return token_ids_a + [self.eos_token_id] def lowerCAmelCase ( self , _lowerCamelCase ): __UpperCamelCase : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(_lowerCamelCase ) __UpperCamelCase : Union[str, Any] = ' '.join(_lowerCamelCase ) __UpperCamelCase : Union[str, Any] = self.encode(_lowerCamelCase ) if len(_lowerCamelCase ) > self.model_max_length: __UpperCamelCase : Tuple = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
287
0
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. a_ : Optional[int] = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. a_ : List[str] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. a_ : Tuple = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = len([g for position, g in enumerate(UpperCAmelCase_ ) if g == main_target[position]] ) return (item, float(UpperCAmelCase_ )) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ): lowerCamelCase_ = random.randint(0 , len(UpperCAmelCase_ ) - 1 ) lowerCamelCase_ = parent_a[:random_slice] + parent_a[random_slice:] lowerCamelCase_ = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] ): lowerCamelCase_ = list(UpperCAmelCase_ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: lowerCamelCase_ = random.choice(UpperCAmelCase_ ) return "".join(UpperCAmelCase_ ) def __snake_case ( UpperCAmelCase_ : tuple[str, float] , UpperCAmelCase_ : list[tuple[str, float]] , UpperCAmelCase_ : list[str] , ): lowerCamelCase_ = [] # Generate more children proportionally to the fitness score. lowerCamelCase_ = int(parent_a[1] * 100 ) + 1 lowerCamelCase_ = 10 if child_n >= 10 else child_n for _ in range(UpperCAmelCase_ ): lowerCamelCase_ = population_score[random.randint(0 , UpperCAmelCase_ )][0] lowerCamelCase_ ,lowerCamelCase_ = crossover(parent_a[0] , UpperCAmelCase_ ) # Append new string to the population list. pop.append(mutate(UpperCAmelCase_ , UpperCAmelCase_ ) ) pop.append(mutate(UpperCAmelCase_ , UpperCAmelCase_ ) ) return pop def __snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] , UpperCAmelCase_ : bool = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: lowerCamelCase_ = F'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(UpperCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. lowerCamelCase_ = sorted({c for c in target if c not in genes} ) if not_in_genes_list: lowerCamelCase_ = F'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(UpperCAmelCase_ ) # Generate random starting population. lowerCamelCase_ = [] for _ in range(UpperCAmelCase_ ): population.append("".join([random.choice(UpperCAmelCase_ ) for i in range(len(UpperCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. lowerCamelCase_ ,lowerCamelCase_ = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(UpperCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. lowerCamelCase_ = [evaluate(UpperCAmelCase_ , UpperCAmelCase_ ) for item in population] # Check if there is a matching evolution. lowerCamelCase_ = sorted(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : x[1] , reverse=UpperCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F'''\nGeneration: {generation}''' F'''\nTotal Population:{total_population}''' F'''\nBest score: {population_score[0][1]}''' F'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. lowerCamelCase_ = population[: int(N_POPULATION / 3 )] population.clear() population.extend(UpperCAmelCase_ ) # Normalize population score to be between 0 and 1. lowerCamelCase_ = [ (item, score / len(UpperCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(UpperCAmelCase_ ): population.extend(select(population_score[int(UpperCAmelCase_ )] , UpperCAmelCase_ , UpperCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(UpperCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": a_ : Optional[Any] = ( """This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!""" ) a_ : Optional[Any] = list( """ ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm""" """nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\""" ) a_ , a_ , a_ : Optional[Any] = basic(target_str, genes_list) print( f'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
675
'''simple docstring''' # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version a_ : List[Any] = get_logger(__name__) class snake_case : """simple docstring""" _lowerCamelCase = "dummy_data" _lowerCamelCase = "datasets" _lowerCamelCase = False def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = True , UpperCamelCase = None , ): """simple docstring""" lowerCamelCase_ = 0 lowerCamelCase_ = dataset_name lowerCamelCase_ = cache_dir lowerCamelCase_ = use_local_dummy_data lowerCamelCase_ = config # download_callbacks take a single url as input lowerCamelCase_ = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root lowerCamelCase_ = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general lowerCamelCase_ = str(UpperCamelCase ) # to be downloaded lowerCamelCase_ = None lowerCamelCase_ = None @property def snake_case ( self ): """simple docstring""" if self._dummy_file is None: lowerCamelCase_ = self.download_dummy_data() return self._dummy_file @property def snake_case ( self ): """simple docstring""" if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def snake_case ( self ): """simple docstring""" lowerCamelCase_ = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) lowerCamelCase_ = cached_path( UpperCamelCase , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase , force_extract=UpperCamelCase ) return os.path.join(UpperCamelCase , self.dummy_file_name ) @property def snake_case ( self ): """simple docstring""" return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def snake_case ( self ): """simple docstring""" if self._bucket_url is None: lowerCamelCase_ = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def snake_case ( self ): """simple docstring""" # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" if self.load_existing_dummy_data: # dummy data is downloaded and tested lowerCamelCase_ = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned lowerCamelCase_ = self.dummy_file_name # special case when data_url is a dict if isinstance(UpperCamelCase , UpperCamelCase ): return self.create_dummy_data_dict(UpperCamelCase , UpperCamelCase ) elif isinstance(UpperCamelCase , (list, tuple) ): return self.create_dummy_data_list(UpperCamelCase , UpperCamelCase ) else: return self.create_dummy_data_single(UpperCamelCase , UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" return self.download_and_extract(UpperCamelCase ) def snake_case ( self , UpperCamelCase , *UpperCamelCase , **UpperCamelCase ): """simple docstring""" return path def snake_case ( self ): """simple docstring""" return {} def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(UpperCamelCase , UpperCamelCase ): for single_url in single_urls: download_callback(UpperCamelCase ) else: lowerCamelCase_ = single_urls download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) for x in single_urls] else: lowerCamelCase_ = single_urls lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(Path(UpperCamelCase ).name ) ) lowerCamelCase_ = value # make sure that values are unique if all(isinstance(UpperCamelCase , UpperCamelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique lowerCamelCase_ = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one lowerCamelCase_ = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase ) ) for url in data_url ) lowerCamelCase_ = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): lowerCamelCase_ = [data_url[0]] * len(UpperCamelCase ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(UpperCamelCase ) return dummy_data_list def snake_case ( self , UpperCamelCase , UpperCamelCase ): """simple docstring""" for download_callback in self.download_callbacks: download_callback(UpperCamelCase ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus lowerCamelCase_ = os.path.join(UpperCamelCase , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(UpperCamelCase ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def snake_case ( self ): """simple docstring""" pass def snake_case ( self ): """simple docstring""" pass def snake_case ( self , UpperCamelCase ): """simple docstring""" def _iter_archive_members(UpperCamelCase ): # this preserves the order of the members inside the ZIP archive lowerCamelCase_ = Path(self.dummy_file ).parent lowerCamelCase_ = path.relative_to(UpperCamelCase ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: lowerCamelCase_ = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(UpperCamelCase ) lowerCamelCase_ = Path(UpperCamelCase ) lowerCamelCase_ = _iter_archive_members(UpperCamelCase ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(UpperCamelCase ).as_posix(), file_path.open("rb" ) def snake_case ( self , UpperCamelCase ): """simple docstring""" if not isinstance(UpperCamelCase , UpperCamelCase ): lowerCamelCase_ = [paths] for path in paths: if os.path.isfile(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(UpperCamelCase ): if os.path.basename(UpperCamelCase ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(UpperCamelCase ): if filename.startswith((".", "__") ): continue yield os.path.join(UpperCamelCase , UpperCamelCase )
675
1
"""simple docstring""" from __future__ import annotations def __magic_name__ ( lowercase , lowercase , lowercase , ): if (stress, tangential_force, area).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif stress < 0: raise ValueError("""Stress cannot be negative""" ) elif tangential_force < 0: raise ValueError("""Tangential Force cannot be negative""" ) elif area < 0: raise ValueError("""Area cannot be negative""" ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
721
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig _UpperCAmelCase = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class a ( UpperCAmelCase__ ): UpperCamelCase : Any = 'albert' def __init__( self : Dict , lowerCAmelCase : List[str]=3_0000 , lowerCAmelCase : List[Any]=128 , lowerCAmelCase : List[str]=4096 , lowerCAmelCase : str=12 , lowerCAmelCase : str=1 , lowerCAmelCase : Tuple=64 , lowerCAmelCase : Dict=1_6384 , lowerCAmelCase : int=1 , lowerCAmelCase : str="gelu_new" , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[Any]=0 , lowerCAmelCase : str=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : Union[str, Any]=1E-12 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : List[Any]="absolute" , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : int=2 , lowerCAmelCase : Optional[int]=3 , **lowerCAmelCase : int , ) -> Tuple: '''simple docstring''' super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase ) SCREAMING_SNAKE_CASE_: List[str] =vocab_size SCREAMING_SNAKE_CASE_: Optional[int] =embedding_size SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size SCREAMING_SNAKE_CASE_: Tuple =num_hidden_layers SCREAMING_SNAKE_CASE_: Any =num_hidden_groups SCREAMING_SNAKE_CASE_: List[Any] =num_attention_heads SCREAMING_SNAKE_CASE_: List[Any] =inner_group_num SCREAMING_SNAKE_CASE_: Optional[int] =hidden_act SCREAMING_SNAKE_CASE_: int =intermediate_size SCREAMING_SNAKE_CASE_: Any =hidden_dropout_prob SCREAMING_SNAKE_CASE_: Union[str, Any] =attention_probs_dropout_prob SCREAMING_SNAKE_CASE_: int =max_position_embeddings SCREAMING_SNAKE_CASE_: Any =type_vocab_size SCREAMING_SNAKE_CASE_: int =initializer_range SCREAMING_SNAKE_CASE_: List[Any] =layer_norm_eps SCREAMING_SNAKE_CASE_: Dict =classifier_dropout_prob SCREAMING_SNAKE_CASE_: int =position_embedding_type class a ( UpperCAmelCase__ ): @property def lowerCamelCase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_: str ={0: """batch""", 1: """choice""", 2: """sequence"""} else: SCREAMING_SNAKE_CASE_: Dict ={0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
36
0
from __future__ import annotations snake_case__ : List[str] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0] snake_case__ : Dict = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1] def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [] __lowercase = len(_SCREAMING_SNAKE_CASE ) for i in range(_SCREAMING_SNAKE_CASE ): __lowercase = -1 for j in range(i + 1 , _SCREAMING_SNAKE_CASE ): if arr[i] < arr[j]: __lowercase = arr[j] break result.append(_SCREAMING_SNAKE_CASE ) return result def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [] for i, outer in enumerate(_SCREAMING_SNAKE_CASE ): __lowercase = -1 for inner in arr[i + 1 :]: if outer < inner: __lowercase = inner break result.append(_SCREAMING_SNAKE_CASE ) return result def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = len(_SCREAMING_SNAKE_CASE ) __lowercase = [] __lowercase = [-1] * arr_size for index in reversed(range(_SCREAMING_SNAKE_CASE ) ): if stack: while stack[-1] <= arr[index]: stack.pop() if not stack: break if stack: __lowercase = stack[-1] stack.append(arr[index] ) return result if __name__ == "__main__": from doctest import testmod from timeit import timeit testmod() print(next_greatest_element_slow(arr)) print(next_greatest_element_fast(arr)) print(next_greatest_element(arr)) snake_case__ : str = ( """from __main__ import arr, next_greatest_element_slow, """ """next_greatest_element_fast, next_greatest_element""" ) print( """next_greatest_element_slow():""", timeit("""next_greatest_element_slow(arr)""", setup=setup), ) print( """next_greatest_element_fast():""", timeit("""next_greatest_element_fast(arr)""", setup=setup), ) print( """ next_greatest_element():""", timeit("""next_greatest_element(arr)""", setup=setup), )
402
def snake_case_ ( _SCREAMING_SNAKE_CASE ): __lowercase = [] __lowercase = set({"(", "[", "{"} ) __lowercase = set({")", "]", "}"} ) __lowercase = {"{": "}", "[": "]", "(": ")"} for i in range(len(_SCREAMING_SNAKE_CASE ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_SCREAMING_SNAKE_CASE ) == 0 def snake_case_ ( ): __lowercase = input("Enter sequence of brackets: " ) if is_balanced(_SCREAMING_SNAKE_CASE ): print(_SCREAMING_SNAKE_CASE , "is balanced" ) else: print(_SCREAMING_SNAKE_CASE , "is not balanced" ) if __name__ == "__main__": main()
402
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __UpperCAmelCase( A__ ): """simple docstring""" __magic_name__ = ["""image_processor""", """tokenizer"""] __magic_name__ = """CLIPImageProcessor""" __magic_name__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self , __magic_name__=None , __magic_name__=None , **__magic_name__ ): """simple docstring""" A_ : Optional[Any] = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , __magic_name__ , ) A_ : Union[str, Any] = kwargs.pop('''feature_extractor''' ) A_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(__magic_name__ , __magic_name__ ) def __call__( self , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ ): """simple docstring""" if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: A_ : Tuple = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if images is not None: A_ : Tuple = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ ) if text is not None and images is not None: A_ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ ) def UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ): """simple docstring""" return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ ) def UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ): """simple docstring""" return self.tokenizer.decode(*__magic_name__ , **__magic_name__ ) @property def UpperCAmelCase ( self ): """simple docstring""" A_ : Optional[int] = self.tokenizer.model_input_names A_ : Dict = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def UpperCAmelCase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __magic_name__ , ) return self.image_processor_class @property def UpperCAmelCase ( self ): """simple docstring""" warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __magic_name__ , ) return self.image_processor
710
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() _lowerCAmelCase = logging.get_logger(__name__) def a__ ( a , a , a ) -> Any: A_ : List[Any] = WavaVecaForSequenceClassification.from_pretrained(a , config=a ) A_ : str = downstream_dict['''projector.weight'''] A_ : Dict = downstream_dict['''projector.bias'''] A_ : str = downstream_dict['''model.post_net.linear.weight'''] A_ : Optional[Any] = downstream_dict['''model.post_net.linear.bias'''] return model def a__ ( a , a , a ) -> Optional[int]: A_ : List[str] = WavaVecaForAudioFrameClassification.from_pretrained(a , config=a ) A_ : Any = downstream_dict['''model.linear.weight'''] A_ : str = downstream_dict['''model.linear.bias'''] return model def a__ ( a , a , a ) -> Optional[int]: A_ : Union[str, Any] = WavaVecaForXVector.from_pretrained(a , config=a ) A_ : Any = downstream_dict['''connector.weight'''] A_ : Dict = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): A_ : Dict = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] A_ : List[str] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] A_ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] A_ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] A_ : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] A_ : List[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] A_ : Union[str, Any] = downstream_dict['''objective.W'''] return model @torch.no_grad() def a__ ( a , a , a , a ) -> str: A_ : List[Any] = torch.load(a , map_location='''cpu''' ) A_ : int = checkpoint['''Downstream'''] A_ : Union[str, Any] = WavaVecaConfig.from_pretrained(a ) A_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained( a , return_attention_mask=a , do_normalize=a ) A_ : List[Any] = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): A_ : str = convert_classification(a , a , a ) elif arch.endswith('''ForAudioFrameClassification''' ): A_ : Tuple = convert_diarization(a , a , a ) elif arch.endswith('''ForXVector''' ): A_ : Dict = convert_xvector(a , a , a ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: A_ : List[Any] = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(a ) hf_model.save_pretrained(a ) if __name__ == "__main__": _lowerCAmelCase = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') _lowerCAmelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
236
0
'''simple docstring''' import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ): '''simple docstring''' if "img_encoder.pos_embed" in name: _lowerCAmelCase = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" ) if "img_encoder.patch_embed.proj" in name: _lowerCAmelCase = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" ) if "img_encoder.patch_embed.norm" in name: _lowerCAmelCase = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" ) if "img_encoder.layers" in name: _lowerCAmelCase = name.replace("img_encoder.layers" , "vision_model.encoder.stages" ) if "blocks" in name and "res" not in name: _lowerCAmelCase = name.replace("blocks" , "layers" ) if "attn" in name and "pre_assign" not in name: _lowerCAmelCase = name.replace("attn" , "self_attn" ) if "proj" in name and "self_attn" in name and "text" not in name: _lowerCAmelCase = name.replace("proj" , "out_proj" ) if "pre_assign_attn.attn.proj" in name: _lowerCAmelCase = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" ) if "norm1" in name: _lowerCAmelCase = name.replace("norm1" , "layer_norm1" ) if "norm2" in name and "pre_assign" not in name: _lowerCAmelCase = name.replace("norm2" , "layer_norm2" ) if "img_encoder.norm" in name: _lowerCAmelCase = name.replace("img_encoder.norm" , "vision_model.layernorm" ) # text encoder if "text_encoder.token_embedding" in name: _lowerCAmelCase = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" ) if "text_encoder.positional_embedding" in name: _lowerCAmelCase = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" ) if "text_encoder.transformer.resblocks." in name: _lowerCAmelCase = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." ) if "ln_1" in name: _lowerCAmelCase = name.replace("ln_1" , "layer_norm1" ) if "ln_2" in name: _lowerCAmelCase = name.replace("ln_2" , "layer_norm2" ) if "c_fc" in name: _lowerCAmelCase = name.replace("c_fc" , "fc1" ) if "c_proj" in name: _lowerCAmelCase = name.replace("c_proj" , "fc2" ) if "text_encoder" in name: _lowerCAmelCase = name.replace("text_encoder" , "text_model" ) if "ln_final" in name: _lowerCAmelCase = name.replace("ln_final" , "final_layer_norm" ) # projection layers if "img_projector.linear_hidden." in name: _lowerCAmelCase = name.replace("img_projector.linear_hidden." , "visual_projection." ) if "img_projector.linear_out." in name: _lowerCAmelCase = name.replace("img_projector.linear_out." , "visual_projection.3." ) if "text_projector.linear_hidden" in name: _lowerCAmelCase = name.replace("text_projector.linear_hidden" , "text_projection" ) if "text_projector.linear_out" in name: _lowerCAmelCase = name.replace("text_projector.linear_out" , "text_projection.3" ) return name def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' for key in orig_state_dict.copy().keys(): _lowerCAmelCase = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _lowerCAmelCase = key.split("." ) _lowerCAmelCase , _lowerCAmelCase = int(key_split[2] ), int(key_split[4] ) _lowerCAmelCase = config.vision_config.hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[dim : dim * 2, :] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[dim : dim * 2] _lowerCAmelCase = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors _lowerCAmelCase = key.split("." ) _lowerCAmelCase = int(key_split[3] ) _lowerCAmelCase = config.text_config.hidden_size if "weight" in key: _lowerCAmelCase = val[:dim, :] _lowerCAmelCase = val[ dim : dim * 2, : ] _lowerCAmelCase = val[-dim:, :] else: _lowerCAmelCase = val[:dim] _lowerCAmelCase = val[dim : dim * 2] _lowerCAmelCase = val[-dim:] else: _lowerCAmelCase = rename_key(SCREAMING_SNAKE_CASE_ ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): _lowerCAmelCase = val.squeeze_() else: _lowerCAmelCase = val return orig_state_dict def __a(): '''simple docstring''' _lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" _lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ) return im @torch.no_grad() def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ): '''simple docstring''' _lowerCAmelCase = GroupViTConfig() _lowerCAmelCase = GroupViTModel(SCREAMING_SNAKE_CASE_ ).eval() _lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"] _lowerCAmelCase = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase , _lowerCAmelCase = model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE_ ) == 0) # verify result _lowerCAmelCase = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" ) _lowerCAmelCase = prepare_img() _lowerCAmelCase = processor(text=["a photo of a cat", "a photo of a dog"] , images=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" ) with torch.no_grad(): _lowerCAmelCase = model(**SCREAMING_SNAKE_CASE_ ) if model_name == "groupvit-gcc-yfcc": _lowerCAmelCase = torch.tensor([[13.3523, 6.3629]] ) elif model_name == "groupvit-gcc-redcaps": _lowerCAmelCase = torch.tensor([[16.1873, 8.6230]] ) else: raise ValueError(F'''Model name {model_name} not supported.''' ) assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE_ , atol=1e-3 ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) print("Successfully saved processor and model to" , SCREAMING_SNAKE_CASE_ ) if push_to_hub: print("Pushing to the hub..." ) processor.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" ) model.push_to_hub(SCREAMING_SNAKE_CASE_ , organization="nielsr" ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model." ) parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint") parser.add_argument( "--model_name", default="groupvit-gccy-fcc", type=str, help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'", ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.", ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
18
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, logging from .tokenization_realm import RealmTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowerCamelCase_ = { 'vocab_file': { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt', }, 'tokenizer_file': { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json' ), 'google/realm-orqa-nq-openqa': ( 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json' ), 'google/realm-orqa-nq-reader': ( 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json' ), 'google/realm-orqa-wq-openqa': ( 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json' ), 'google/realm-orqa-wq-reader': ( 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json' ), }, } lowerCamelCase_ = { 'google/realm-cc-news-pretrained-embedder': 5_12, 'google/realm-cc-news-pretrained-encoder': 5_12, 'google/realm-cc-news-pretrained-scorer': 5_12, 'google/realm-cc-news-pretrained-openqa': 5_12, 'google/realm-orqa-nq-openqa': 5_12, 'google/realm-orqa-nq-reader': 5_12, 'google/realm-orqa-wq-openqa': 5_12, 'google/realm-orqa-wq-reader': 5_12, } lowerCamelCase_ = { 'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True}, 'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True}, 'google/realm-orqa-nq-openqa': {'do_lower_case': True}, 'google/realm-orqa-nq-reader': {'do_lower_case': True}, 'google/realm-orqa-wq-openqa': {'do_lower_case': True}, 'google/realm-orqa-wq-reader': {'do_lower_case': True}, } class lowercase_ ( A ): """simple docstring""" lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = RealmTokenizer def __init__( self : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=True , __lowerCamelCase : Union[str, Any]="[UNK]" , __lowerCamelCase : Any="[SEP]" , __lowerCamelCase : List[str]="[PAD]" , __lowerCamelCase : Optional[Any]="[CLS]" , __lowerCamelCase : List[str]="[MASK]" , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : Dict , ): """simple docstring""" super().__init__( __lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , ) _SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , __lowerCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , __lowerCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , __lowerCamelCase ) != tokenize_chinese_chars ): _SCREAMING_SNAKE_CASE = getattr(__lowerCamelCase , normalizer_state.pop("type" ) ) _SCREAMING_SNAKE_CASE = do_lower_case _SCREAMING_SNAKE_CASE = strip_accents _SCREAMING_SNAKE_CASE = tokenize_chinese_chars _SCREAMING_SNAKE_CASE = normalizer_class(**__lowerCamelCase ) _SCREAMING_SNAKE_CASE = do_lower_case def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : List[str] , **__lowerCamelCase : str ): """simple docstring""" _SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH _SCREAMING_SNAKE_CASE = text _SCREAMING_SNAKE_CASE = kwargs.pop("text_pair" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = kwargs.pop("return_tensors" , __lowerCamelCase ) _SCREAMING_SNAKE_CASE = { "input_ids": [], "attention_mask": [], "token_type_ids": [], } for idx, candidate_text in enumerate(__lowerCamelCase ): if batch_text_pair is not None: _SCREAMING_SNAKE_CASE = batch_text_pair[idx] else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase ) _SCREAMING_SNAKE_CASE = encoded_candidates.get("input_ids" ) _SCREAMING_SNAKE_CASE = encoded_candidates.get("attention_mask" ) _SCREAMING_SNAKE_CASE = encoded_candidates.get("token_type_ids" ) if encoded_input_ids is not None: output_data["input_ids"].append(__lowerCamelCase ) if encoded_attention_mask is not None: output_data["attention_mask"].append(__lowerCamelCase ) if encoded_token_type_ids is not None: output_data["token_type_ids"].append(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = {key: item for key, item in output_data.items() if len(__lowerCamelCase ) != 0} return BatchEncoding(__lowerCamelCase , tensor_type=__lowerCamelCase ) def lowerCAmelCase_ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : str=None ): """simple docstring""" _SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ): """simple docstring""" _SCREAMING_SNAKE_CASE = [self.sep_token_id] _SCREAMING_SNAKE_CASE = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ): """simple docstring""" _SCREAMING_SNAKE_CASE = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase ) return tuple(__lowerCamelCase )
418
0
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def UpperCamelCase__ ( UpperCAmelCase ) -> List[Any]: """simple docstring""" def wrapper(*UpperCAmelCase , **UpperCAmelCase ): _a : str = timeit.default_timer() _a : Optional[int] = func(*UpperCAmelCase , **UpperCAmelCase ) _a : Any = timeit.default_timer() - starttime return delta _a : Optional[int] = func.__name__ return wrapper def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=None ) -> List[Any]: """simple docstring""" _a : Optional[Any] = [] _a : str = seq_shapes or {} for i in range(UpperCAmelCase ): _a : Union[str, Any] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(UpperCAmelCase , _ArrayXD ): _a : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(UpperCAmelCase , datasets.Value ): if v.dtype == "string": _a : int = '''The small grey turtle was surprisingly fast when challenged.''' else: _a : str = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(UpperCAmelCase , datasets.Sequence ): while isinstance(UpperCAmelCase , datasets.Sequence ): _a : Optional[Any] = v.feature _a : str = seq_shapes[k] _a : Any = np.random.rand(*UpperCAmelCase ).astype(v.dtype ) _a : int = data dummy_data.append((i, example) ) return dummy_data def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=100 , UpperCAmelCase=None ) -> List[Any]: """simple docstring""" _a : Union[str, Any] = generate_examples(UpperCAmelCase , num_examples=UpperCAmelCase , seq_shapes=UpperCAmelCase ) with ArrowWriter(features=UpperCAmelCase , path=UpperCAmelCase ) as writer: for key, record in dummy_data: _a : Dict = features.encode_example(UpperCAmelCase ) writer.write(UpperCAmelCase ) _a , _a : Optional[int] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' ) _a : List[Any] = datasets.Dataset.from_file(filename=UpperCAmelCase , info=datasets.DatasetInfo(features=UpperCAmelCase ) ) return dataset
307
import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __lowerCamelCase = collections.namedtuple('_Datasets', ['train', 'validation', 'test']) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __lowerCamelCase = 'https://storage.googleapis.com/cvdf-datasets/mnist/' def UpperCamelCase__ ( UpperCAmelCase ) -> str: """simple docstring""" _a : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0] @deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def UpperCamelCase__ ( UpperCAmelCase ) -> Optional[Any]: """simple docstring""" print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream: _a : List[Any] = _readaa(UpperCAmelCase ) if magic != 2051: raise ValueError( '''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) ) _a : Any = _readaa(UpperCAmelCase ) _a : Optional[int] = _readaa(UpperCAmelCase ) _a : Optional[int] = _readaa(UpperCAmelCase ) _a : Tuple = bytestream.read(rows * cols * num_images ) _a : int = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta ) _a : List[Any] = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 ) return data @deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' ) def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> str: """simple docstring""" _a : List[Any] = labels_dense.shape[0] _a : List[str] = numpy.arange(UpperCAmelCase ) * num_classes _a : List[str] = numpy.zeros((num_labels, num_classes) ) _a : Dict = 1 return labels_one_hot @deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' ) def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ) -> str: """simple docstring""" print('''Extracting''' , f.name ) with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream: _a : List[Any] = _readaa(UpperCAmelCase ) if magic != 2049: raise ValueError( '''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) ) _a : str = _readaa(UpperCAmelCase ) _a : Dict = bytestream.read(UpperCAmelCase ) _a : List[str] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase ) return labels class UpperCamelCase_ : @deprecated( lowercase , '''Please use alternatives such as official/mnist/_DataSet.py''' ''' from tensorflow/models.''' , ) def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ) -> Dict: _a , _a : int = random_seed.get_seed(lowercase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) _a : str = dtypes.as_dtype(lowercase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype ) if fake_data: _a : int = 10_000 _a : List[str] = one_hot else: assert ( images.shape[0] == labels.shape[0] ), F'images.shape: {images.shape} labels.shape: {labels.shape}' _a : Dict = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 _a : Union[str, Any] = images.reshape( images.shape[0] , images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. _a : List[str] = images.astype(numpy.floataa ) _a : Any = numpy.multiply(lowercase , 1.0 / 255.0 ) _a : Any = images _a : Tuple = labels _a : str = 0 _a : Dict = 0 @property def snake_case__( self ) -> Tuple: return self._images @property def snake_case__( self ) -> Optional[Any]: return self._labels @property def snake_case__( self ) -> Tuple: return self._num_examples @property def snake_case__( self ) -> Optional[Any]: return self._epochs_completed def snake_case__( self , lowercase , lowercase=False , lowercase=True ) -> int: if fake_data: _a : Optional[Any] = [1] * 784 _a : Optional[int] = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(lowercase )], [fake_label for _ in range(lowercase )], ) _a : Union[str, Any] = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: _a : Tuple = numpy.arange(self._num_examples ) numpy.random.shuffle(lowercase ) _a : Any = self.images[perma] _a : Union[str, Any] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch _a : Any = self._num_examples - start _a : Any = self._images[start : self._num_examples] _a : Optional[Any] = self._labels[start : self._num_examples] # Shuffle the data if shuffle: _a : Tuple = numpy.arange(self._num_examples ) numpy.random.shuffle(lowercase ) _a : Union[str, Any] = self.images[perm] _a : str = self.labels[perm] # Start next epoch _a : List[Any] = 0 _a : Optional[int] = batch_size - rest_num_examples _a : int = self._index_in_epoch _a : str = self._images[start:end] _a : Union[str, Any] = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) , axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ), ) else: self._index_in_epoch += batch_size _a : Optional[int] = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' ) def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: """simple docstring""" if not gfile.Exists(UpperCAmelCase ): gfile.MakeDirs(UpperCAmelCase ) _a : Tuple = os.path.join(UpperCAmelCase , UpperCAmelCase ) if not gfile.Exists(UpperCAmelCase ): urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310 with gfile.GFile(UpperCAmelCase ) as f: _a : Union[str, Any] = f.size() print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' ) return filepath @deprecated( UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' ) def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ) -> Any: """simple docstring""" if fake_data: def fake(): return _DataSet( [] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase ) _a : Dict = fake() _a : Union[str, Any] = fake() _a : Any = fake() return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase ) if not source_url: # empty string check _a : List[str] = DEFAULT_SOURCE_URL _a : int = '''train-images-idx3-ubyte.gz''' _a : Optional[int] = '''train-labels-idx1-ubyte.gz''' _a : Union[str, Any] = '''t10k-images-idx3-ubyte.gz''' _a : int = '''t10k-labels-idx1-ubyte.gz''' _a : Tuple = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + train_images_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: _a : Union[str, Any] = _extract_images(UpperCAmelCase ) _a : Dict = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: _a : Optional[int] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase ) _a : str = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + test_images_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: _a : Any = _extract_images(UpperCAmelCase ) _a : Optional[Any] = _maybe_download( UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file ) with gfile.Open(UpperCAmelCase , '''rb''' ) as f: _a : Tuple = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase ) if not 0 <= validation_size <= len(UpperCAmelCase ): _a : Union[str, Any] = ( '''Validation size should be between 0 and ''' F'{len(UpperCAmelCase )}. Received: {validation_size}.' ) raise ValueError(UpperCAmelCase ) _a : str = train_images[:validation_size] _a : List[Any] = train_labels[:validation_size] _a : Union[str, Any] = train_images[validation_size:] _a : List[str] = train_labels[validation_size:] _a : Tuple = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed} _a : Optional[Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) _a : Union[str, Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) _a : Union[str, Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
307
1
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time SCREAMING_SNAKE_CASE_ = Lock() def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_lowerCAmelCase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() __lowerCAmelCase = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left __lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_lowerCAmelCase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() __lowerCAmelCase = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right __lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_lowerCAmelCase ) def lowercase (_lowerCAmelCase ): __lowerCAmelCase = [] __lowerCAmelCase = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop __lowerCAmelCase = Pipe() __lowerCAmelCase = Pipe() process_array_.append( Process( target=_lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) __lowerCAmelCase = temp_rs __lowerCAmelCase = temp_rr for i in range(1 , len(_lowerCAmelCase ) - 1 ): __lowerCAmelCase = Pipe() __lowerCAmelCase = Pipe() process_array_.append( Process( target=_lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) __lowerCAmelCase = temp_rs __lowerCAmelCase = temp_rr process_array_.append( Process( target=_lowerCAmelCase , args=( len(_lowerCAmelCase ) - 1, arr[len(_lowerCAmelCase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_lowerCAmelCase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_lowerCAmelCase ) ): __lowerCAmelCase = result_pipe[p][0].recv() process_array_[p].join() return arr def lowercase (): __lowerCAmelCase = list(range(10 , 0 , -1 ) ) print("""Initial List""" ) print(*_lowerCAmelCase ) __lowerCAmelCase = odd_even_transposition(_lowerCAmelCase ) print("""Sorted List\n""" ) print(*_lowerCAmelCase ) if __name__ == "__main__": main()
465
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE_ = { '''snap-research/efficientformer-l1-300''': ( '''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json''' ), } class lowerCAmelCase_ ( A__ ): '''simple docstring''' _snake_case = '''efficientformer''' def __init__( self , snake_case_ = [3, 2, 6, 4] , snake_case_ = [48, 96, 224, 448] , snake_case_ = [True, True, True, True] , snake_case_ = 448 , snake_case_ = 32 , snake_case_ = 4 , snake_case_ = 7 , snake_case_ = 5 , snake_case_ = 8 , snake_case_ = 4 , snake_case_ = 0.0 , snake_case_ = 16 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 3 , snake_case_ = 2 , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = 1 , snake_case_ = True , snake_case_ = True , snake_case_ = 1e-5 , snake_case_ = "gelu" , snake_case_ = 0.02 , snake_case_ = 1e-1_2 , snake_case_ = 224 , snake_case_ = 1e-0_5 , **snake_case_ , ) -> None: super().__init__(**snake_case_ ) __lowerCAmelCase = hidden_act __lowerCAmelCase = hidden_dropout_prob __lowerCAmelCase = hidden_sizes __lowerCAmelCase = num_hidden_layers __lowerCAmelCase = num_attention_heads __lowerCAmelCase = initializer_range __lowerCAmelCase = layer_norm_eps __lowerCAmelCase = patch_size __lowerCAmelCase = num_channels __lowerCAmelCase = depths __lowerCAmelCase = mlp_expansion_ratio __lowerCAmelCase = downsamples __lowerCAmelCase = dim __lowerCAmelCase = key_dim __lowerCAmelCase = attention_ratio __lowerCAmelCase = resolution __lowerCAmelCase = pool_size __lowerCAmelCase = downsample_patch_size __lowerCAmelCase = downsample_stride __lowerCAmelCase = downsample_pad __lowerCAmelCase = drop_path_rate __lowerCAmelCase = num_metaad_blocks __lowerCAmelCase = distillation __lowerCAmelCase = use_layer_scale __lowerCAmelCase = layer_scale_init_value __lowerCAmelCase = image_size __lowerCAmelCase = batch_norm_eps
465
1
import math def __lowerCamelCase ( __a :int ) -> bool: """simple docstring""" return math.sqrt(__snake_case ) * math.sqrt(__snake_case ) == num def __lowerCamelCase ( __a :int ) -> bool: """simple docstring""" A__ = 0 A__ = n while left <= right: A__ = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: A__ = mid - 1 else: A__ = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
713
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets A : Dict = datasets.logging.get_logger(__name__) A : Optional[Any] = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' A : int = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' A : Union[str, Any] = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def __lowerCamelCase ( __a :Dict , __a :int , __a :int=False , __a :Optional[Any]=False , __a :int=True , __a :Optional[int]=False , __a :Dict="dummy_doc" ) -> Any: """simple docstring""" A__ = {doc: key_lines} A__ = {doc: sys_lines} A__ = {} A__ = 0 A__ = 0 A__ = 0 A__ = 0 A__ = 0 A__ = 0 A__ , A__ = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a ) key_singletons_num += singletons_num if NP_only or min_span: A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) A__ , A__ = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a ) sys_singletons_num += singletons_num if NP_only or min_span: A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a ) if remove_nested: A__ , A__ = reader.remove_nested_coref_mentions(__a , __a ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters A__ , A__ = reader.remove_nested_coref_mentions(__a , __a ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters A__ = reader.get_mention_assignments(__a , __a ) A__ = reader.get_mention_assignments(__a , __a ) A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( """Number of removed nested coreferring mentions in the key """ F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( """Number of resulting singleton clusters in the key """ F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' """files, respectively""" ) return doc_coref_infos def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :List[str] , __a :Dict , __a :str , __a :Tuple , __a :Union[str, Any] ) -> Optional[int]: """simple docstring""" A__ = get_coref_infos(__a , __a , __a , __a , __a , __a ) A__ = {} A__ = 0 A__ = 0 for name, metric in metrics: A__ , A__ , A__ = evaluator.evaluate_documents(__a , __a , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} ) logger.info( name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , ) if conll_subparts_num == 3: A__ = (conll / 3) * 1_0_0 logger.info(F'CoNLL score: {conll:.2f}' ) output_scores.update({"""conll_score""": conll} ) return output_scores def __lowerCamelCase ( __a :int ) -> List[Any]: """simple docstring""" A__ = False for line in key_lines: if not line.startswith("""#""" ): if len(line.split() ) > 6: A__ = line.split()[5] if not parse_col == "-": A__ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A (datasets.Metric ): '''simple docstring''' def a_ ( self : int ) -> Optional[int]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" ) ), """references""": datasets.Sequence(datasets.Value("""string""" ) ), } ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[ """https://github.com/ns-moosavi/coval""", """https://www.aclweb.org/anthology/P16-1060""", """http://www.conll.cemantix.org/2012/data.html""", ] , ) def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False ) -> Optional[int]: """simple docstring""" A__ = [ ("""mentions""", evaluator.mentions), ("""muc""", evaluator.muc), ("""bcub""", evaluator.b_cubed), ("""ceafe""", evaluator.ceafe), ("""lea""", evaluator.lea), ] if min_span: A__ = util.check_gold_parse_annotation(__lowerCAmelCase ) if not has_gold_parse: raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" A__ = evaluate( key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , ) return score
247
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class __snake_case (unittest.TestCase ): @property def __a ( self: Optional[Any] ): torch.manual_seed(0 ) __lowerCamelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def __a ( self: Tuple ): __lowerCamelCase = self.dummy_uncond_unet __lowerCamelCase = PNDMScheduler() __lowerCamelCase = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=A_ , num_inference_steps=20 , output_type="""numpy""" ).images __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=A_ , num_inference_steps=20 , output_type="""numpy""" , return_dict=A_ )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class __snake_case (unittest.TestCase ): def __a ( self: Tuple ): __lowerCamelCase = '''google/ddpm-cifar10-32''' __lowerCamelCase = UNetaDModel.from_pretrained(A_ ) __lowerCamelCase = PNDMScheduler() __lowerCamelCase = PNDMPipeline(unet=A_ , scheduler=A_ ) pndm.to(A_ ) pndm.set_progress_bar_config(disable=A_ ) __lowerCamelCase = torch.manual_seed(0 ) __lowerCamelCase = pndm(generator=A_ , output_type="""numpy""" ).images __lowerCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __lowerCamelCase = np.array([0.1_564, 0.14_645, 0.1_406, 0.14_715, 0.12_425, 0.14_045, 0.13_115, 0.12_175, 0.125] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
281
'''simple docstring''' import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class SCREAMING_SNAKE_CASE_ ( snake_case , snake_case , unittest.TestCase ): __a : Tuple = IFPipeline __a : List[Any] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} __a : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS __a : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''} def _snake_case ( self ) -> List[str]: '''simple docstring''' return self._get_dummy_components() def _snake_case ( self , lowercase , lowercase=0 ) -> int: '''simple docstring''' if str(lowercase ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(lowercase ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowercase ).manual_seed(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs def _snake_case ( self ) -> int: '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def _snake_case ( self ) -> Tuple: '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1e-1 ) def _snake_case ( self ) -> Dict: '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _snake_case ( self ) -> Any: '''simple docstring''' self._test_save_load_local() def _snake_case ( self ) -> Optional[int]: '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _snake_case ( self ) -> Optional[Any]: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def _snake_case ( self ) -> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ) -> Tuple: '''simple docstring''' __SCREAMING_SNAKE_CASE : List[str] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Dict = IFSuperResolutionPipeline.from_pretrained( '''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=lowercase , tokenizer=lowercase ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('''cuda''' ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() __SCREAMING_SNAKE_CASE : str = None __SCREAMING_SNAKE_CASE : Tuple = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(lowercase , lowercase , lowercase , lowercase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img __SCREAMING_SNAKE_CASE : List[str] = IFImgaImgPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(lowercase , lowercase , lowercase , lowercase ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting __SCREAMING_SNAKE_CASE : str = IFInpaintingPipeline(**pipe_a.components ) __SCREAMING_SNAKE_CASE : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(lowercase , lowercase , lowercase , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Any: '''simple docstring''' _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : List[Any] = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] assert image.shape == (6_4, 6_4, 3) __SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 __SCREAMING_SNAKE_CASE : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Any = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Dict = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Dict = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __SCREAMING_SNAKE_CASE : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]: '''simple docstring''' _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) __SCREAMING_SNAKE_CASE : str = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Tuple = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __SCREAMING_SNAKE_CASE : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) def _snake_case ( self , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]: '''simple docstring''' _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , num_inference_steps=2 , generator=lowercase , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Optional[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) __SCREAMING_SNAKE_CASE : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 __SCREAMING_SNAKE_CASE : Any = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) # pipeline 2 _start_torch_memory_measurement() __SCREAMING_SNAKE_CASE : Dict = torch.Generator(device='''cpu''' ).manual_seed(0 ) __SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(lowercase ) __SCREAMING_SNAKE_CASE : Dict = pipe_a( prompt_embeds=lowercase , negative_prompt_embeds=lowercase , image=lowercase , mask_image=lowercase , original_image=lowercase , generator=lowercase , num_inference_steps=2 , output_type='''np''' , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) __SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 __SCREAMING_SNAKE_CASE : Dict = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' ) assert_mean_pixel_difference(lowercase , lowercase ) def A_ ( ) -> List[str]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
158
0
'''simple docstring''' import os from datetime import datetime as dt from github import Github a : Tuple = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''enhancement''', '''new pipeline/model''', '''new scheduler''', '''wip''', ] def __UpperCAmelCase ( ) -> int: __snake_case = Github(os.environ["GITHUB_TOKEN"] ) __snake_case = g.get_repo("huggingface/diffusers" ) __snake_case = repo.get_issues(state="open" ) for issue in open_issues: __snake_case = sorted(issue.get_comments() , key=lambda _UpperCAmelCase : i.created_at , reverse=_UpperCAmelCase ) __snake_case = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
680
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a : str = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : int = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
680
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase : List[Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[Any] = ["TimmBackbone"] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
690
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCamelCase__ ( unittest.TestCase ): def __a ( self : Union[str, Any] ): A = torch.nn.Linear(10 , 10 ) A = torch.optim.SGD(model.parameters() , 0.1 ) A = Accelerator() A = accelerator.prepare(_lowercase ) try: pickle.loads(pickle.dumps(_lowercase ) ) except Exception as e: self.fail(f'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
690
1
def lowerCAmelCase ( lowerCAmelCase_=28_123 )-> str: lowerCAmelCase_ : List[Any] = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i lowerCAmelCase_ : Optional[int] = set() lowerCAmelCase_ : Dict = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(lowerCAmelCase_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
619
import csv import tweepy # Twitter API credentials _UpperCAmelCase : int ="""""" _UpperCAmelCase : Optional[int] ="""""" _UpperCAmelCase : Dict ="""""" _UpperCAmelCase : str ="""""" def lowerCAmelCase ( lowerCAmelCase_ )-> None: # authorize twitter, initialize tweepy lowerCAmelCase_ : Optional[int] = tweepy.OAuthHandler(lowerCAmelCase_ , lowerCAmelCase_ ) auth.set_access_token(lowerCAmelCase_ , lowerCAmelCase_ ) lowerCAmelCase_ : Any = tweepy.API(lowerCAmelCase_ ) # initialize a list to hold all the tweepy Tweets lowerCAmelCase_ : Dict = [] # make initial request for most recent tweets (200 is the maximum allowed count) lowerCAmelCase_ : Optional[int] = api.user_timeline(screen_name=lowerCAmelCase_ , count=200 ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # save the id of the oldest tweet less one lowerCAmelCase_ : str = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(lowerCAmelCase_ ) > 0: print(f"""getting tweets before {oldest}""" ) # all subsequent requests use the max_id param to prevent duplicates lowerCAmelCase_ : Optional[Any] = api.user_timeline( screen_name=lowerCAmelCase_ , count=200 , max_id=lowerCAmelCase_ ) # save most recent tweets alltweets.extend(lowerCAmelCase_ ) # update the id of the oldest tweet less one lowerCAmelCase_ : Optional[Any] = alltweets[-1].id - 1 print(f"""...{len(lowerCAmelCase_ )} tweets downloaded so far""" ) # transform the tweepy tweets into a 2D array that will populate the csv lowerCAmelCase_ : Union[str, Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f"""new_{screen_name}_tweets.csv""" , '''w''' ) as f: lowerCAmelCase_ : Optional[int] = csv.writer(lowerCAmelCase_ ) writer.writerow(['''id''', '''created_at''', '''text'''] ) writer.writerows(lowerCAmelCase_ ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets("""FirePing32""")
619
1
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration UpperCAmelCase_ = """facebook/wmt19-en-de""" UpperCAmelCase_ = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model UpperCAmelCase_ = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) UpperCAmelCase_ = FSMTForConditionalGeneration(config) print(f'num of params {tiny_model.num_parameters()}') # Test UpperCAmelCase_ = tokenizer(["""Making tiny model"""], return_tensors="""pt""") UpperCAmelCase_ = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save UpperCAmelCase_ = """tiny-wmt19-en-de""" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-de
2
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets __UpperCamelCase = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' __UpperCamelCase = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' __UpperCamelCase = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def __A ( self ) -> List[str]: if version.parse(scb.__version__ ) < version.parse('1.4.12' ): raise ImportWarning( 'To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n' 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/mjpost/sacreBLEU#chrf--chrf' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Sequence(datasets.Value('string' , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/mjpost/sacreBLEU#chrf--chrf'] , reference_urls=[ 'https://github.com/m-popovic/chrF', ] , ) def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = CHRF.CHAR_ORDER , lowerCAmelCase__ = CHRF.WORD_ORDER , lowerCAmelCase__ = CHRF.BETA , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , ) -> Optional[Any]: SCREAMING_SNAKE_CASE = len(references[0] ) if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ): raise ValueError('Sacrebleu requires the same number of references for each prediction' ) SCREAMING_SNAKE_CASE = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )] SCREAMING_SNAKE_CASE = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
247
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : List[str] = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Union[str, Any] = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
580
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase_ ( unittest.TestCase ): def UpperCamelCase ( self ): _snake_case : Optional[Any] = tempfile.mkdtemp() _snake_case : List[str] = BlipImageProcessor() _snake_case : Optional[int] = GPTaTokenizer.from_pretrained("hf-internal-testing/tiny-random-GPT2Model" ) _snake_case : Optional[int] = BlipaProcessor(lowercase_ , lowercase_ ) processor.save_pretrained(self.tmpdirname ) def UpperCamelCase ( self , **lowercase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer def UpperCamelCase ( self , **lowercase_ ): return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def UpperCamelCase ( self ): shutil.rmtree(self.tmpdirname ) def UpperCamelCase ( self ): _snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _snake_case : int = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase ( self ): _snake_case : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _snake_case : Dict = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) _snake_case : Dict = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Optional[int] = self.get_image_processor() _snake_case : Optional[int] = self.get_tokenizer() _snake_case : str = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Tuple = self.prepare_image_inputs() _snake_case : Optional[int] = image_processor(lowercase_ , return_tensors="np" ) _snake_case : Any = processor(images=lowercase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def UpperCamelCase ( self ): _snake_case : Optional[int] = self.get_image_processor() _snake_case : str = self.get_tokenizer() _snake_case : Optional[Any] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : List[Any] = "lower newer" _snake_case : Tuple = processor(text=lowercase_ ) _snake_case : Union[str, Any] = tokenizer(lowercase_ , return_token_type_ids=lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase ( self ): _snake_case : str = self.get_image_processor() _snake_case : List[Any] = self.get_tokenizer() _snake_case : Optional[int] = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Any = "lower newer" _snake_case : str = self.prepare_image_inputs() _snake_case : List[str] = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCamelCase ( self ): _snake_case : List[str] = self.get_image_processor() _snake_case : Union[str, Any] = self.get_tokenizer() _snake_case : Tuple = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _snake_case : Tuple = processor.batch_decode(lowercase_ ) _snake_case : Union[str, Any] = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCamelCase ( self ): _snake_case : Optional[int] = self.get_image_processor() _snake_case : Dict = self.get_tokenizer() _snake_case : Dict = BlipaProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) _snake_case : int = "lower newer" _snake_case : Any = self.prepare_image_inputs() _snake_case : str = processor(text=lowercase_ , images=lowercase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
580
1