code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class lowercase : def __init__( self : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any]=13 , _UpperCamelCase : Optional[Any]=7 , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : int=True , _UpperCamelCase : Any=True , _UpperCamelCase : int=99 , _UpperCamelCase : List[str]=32 , _UpperCamelCase : Optional[int]=5 , _UpperCamelCase : Optional[Any]=4 , _UpperCamelCase : Dict=37 , _UpperCamelCase : int="gelu" , _UpperCamelCase : Optional[int]=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : Any=512 , _UpperCamelCase : Dict=16 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : List[str]=3 , _UpperCamelCase : Optional[int]=4 , _UpperCamelCase : Union[str, Any]=None , ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = seq_length SCREAMING_SNAKE_CASE = is_training SCREAMING_SNAKE_CASE = use_token_type_ids SCREAMING_SNAKE_CASE = use_labels SCREAMING_SNAKE_CASE = vocab_size SCREAMING_SNAKE_CASE = hidden_size SCREAMING_SNAKE_CASE = num_hidden_layers SCREAMING_SNAKE_CASE = num_attention_heads SCREAMING_SNAKE_CASE = intermediate_size SCREAMING_SNAKE_CASE = hidden_act SCREAMING_SNAKE_CASE = hidden_dropout_prob SCREAMING_SNAKE_CASE = attention_probs_dropout_prob SCREAMING_SNAKE_CASE = max_position_embeddings SCREAMING_SNAKE_CASE = type_vocab_size SCREAMING_SNAKE_CASE = type_sequence_label_size SCREAMING_SNAKE_CASE = initializer_range SCREAMING_SNAKE_CASE = num_labels SCREAMING_SNAKE_CASE = num_choices SCREAMING_SNAKE_CASE = scope SCREAMING_SNAKE_CASE = self.vocab_size - 1 def __snake_case( self : int ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = None if self.use_labels: SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) SCREAMING_SNAKE_CASE = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def __snake_case( self : Any , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , *_UpperCamelCase : str ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTModel(config=_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , head_mask=_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , *_UpperCamelCase : str ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTLMHeadModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , *_UpperCamelCase : str ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] , *_UpperCamelCase : int ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.num_labels SCREAMING_SNAKE_CASE = OpenAIGPTForSequenceClassification(_UpperCamelCase ) model.to(_UpperCamelCase ) model.eval() SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE = model(_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case( self : int ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ) = config_and_inputs SCREAMING_SNAKE_CASE = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class lowercase ( a , a , a , unittest.TestCase ): lowercase__ : List[str] = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowercase__ : int = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowercase__ : Any = ( { """feature-extraction""": OpenAIGPTModel, """text-classification""": OpenAIGPTForSequenceClassification, """text-generation""": OpenAIGPTLMHeadModel, """zero-shot""": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def __snake_case( self : int , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) -> int: '''simple docstring''' if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def __snake_case( self : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any]=False ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = super()._prepare_for_class(_UpperCamelCase , _UpperCamelCase , return_labels=_UpperCamelCase ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCamelCase , ) SCREAMING_SNAKE_CASE = inputs_dict["labels"] SCREAMING_SNAKE_CASE = inputs_dict["labels"] SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_UpperCamelCase , ) SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCamelCase ) return inputs_dict def __snake_case( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTModelTester(self ) SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_UpperCamelCase , n_embd=37 ) def __snake_case( self : Dict ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def __snake_case( self : List[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*_UpperCamelCase ) def __snake_case( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*_UpperCamelCase ) def __snake_case( self : List[str] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*_UpperCamelCase ) def __snake_case( self : Optional[int] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_UpperCamelCase ) @slow def __snake_case( self : Union[str, Any] ) -> List[str]: '''simple docstring''' for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE = OpenAIGPTModel.from_pretrained(_UpperCamelCase ) self.assertIsNotNone(_UpperCamelCase ) @require_torch class lowercase ( unittest.TestCase ): @slow def __snake_case( self : Any ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt" ) model.to(_UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_UpperCamelCase ) # the president is SCREAMING_SNAKE_CASE = [ 481, 4_735, 544, 246, 963, 870, 762, 239, 244, 40_477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the SCREAMING_SNAKE_CASE = model.generate(_UpperCamelCase , do_sample=_UpperCamelCase ) self.assertListEqual(output_ids[0].tolist() , _UpperCamelCase )
714
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any] ): return [ int(1_0_0_0 * (box[0] / width) ), int(1_0_0_0 * (box[1] / height) ), int(1_0_0_0 * (box[2] / width) ), int(1_0_0_0 * (box[3] / height) ), ] def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] = None ): SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else "" # apply OCR SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()] SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices] SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format SCREAMING_SNAKE_CASE = [] for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): SCREAMING_SNAKE_CASE = [x, y, x + w, y + h] actual_boxes.append(UpperCAmelCase__ ) # finally, normalize the bounding boxes SCREAMING_SNAKE_CASE = [] for box in actual_boxes: normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes" return words, normalized_boxes class lowercase ( a ): lowercase__ : Optional[int] = ["""pixel_values"""] def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Optional[int] , ) -> None: '''simple docstring''' super().__init__(**_UpperCamelCase ) SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224} SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase ) SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = resample SCREAMING_SNAKE_CASE = apply_ocr SCREAMING_SNAKE_CASE = ocr_lang SCREAMING_SNAKE_CASE = tesseract_config def __snake_case( self : List[Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Any , ) -> np.ndarray: '''simple docstring''' SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" ) SCREAMING_SNAKE_CASE = (size["height"], size["width"]) return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase ) def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : str , ) -> PIL.Image.Image: '''simple docstring''' SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize SCREAMING_SNAKE_CASE = size if size is not None else self.size SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase ) SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase ) if not valid_images(_UpperCamelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images] if apply_ocr: requires_backends(self , "pytesseract" ) SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] for image in images: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) words_batch.append(_UpperCamelCase ) boxes_batch.append(_UpperCamelCase ) if do_resize: SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) SCREAMING_SNAKE_CASE = [flip_channel_order(_UpperCamelCase ) for image in images] SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images] SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase ) if apply_ocr: SCREAMING_SNAKE_CASE = words_batch SCREAMING_SNAKE_CASE = boxes_batch return data
647
0
from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) class lowercase ( a ): lowercase__ : str = ["""input_values""", """padding_mask"""] def __init__( self : int , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 24_000 , _UpperCamelCase : float = 0.0 , _UpperCamelCase : float = None , _UpperCamelCase : float = None , **_UpperCamelCase : Optional[int] , ) -> Any: '''simple docstring''' super().__init__(feature_size=_UpperCamelCase , sampling_rate=_UpperCamelCase , padding_value=_UpperCamelCase , **_UpperCamelCase ) SCREAMING_SNAKE_CASE = chunk_length_s SCREAMING_SNAKE_CASE = overlap @property def __snake_case( self : Optional[int] ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __snake_case( self : Any ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self : Union[str, Any] , _UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _UpperCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None , _UpperCamelCase : Optional[bool] = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : Optional[int] = None , ) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of" F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with" F" {self.sampling_rate} and not {sampling_rate}." ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) if padding and truncation: raise ValueError("Both padding and truncation were set. Make sure you only set one." ) elif padding is None: # by default let's pad the inputs SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = bool( isinstance(_UpperCamelCase , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase , dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(_UpperCamelCase , np.ndarray ): SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase , dtype=np.floataa ) elif isinstance(_UpperCamelCase , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(_UpperCamelCase ): if example.ndim > 2: raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" ) SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio ) SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) ) SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio ) SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) ) SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length SCREAMING_SNAKE_CASE = "max_length" else: SCREAMING_SNAKE_CASE = input_values # normal padding on batch if padded_inputs is None: SCREAMING_SNAKE_CASE = self.pad( _UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase , padding=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ) if padding: SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" ) SCREAMING_SNAKE_CASE = [] for example in padded_inputs.pop("input_values" ): if self.feature_size == 1: SCREAMING_SNAKE_CASE = example[..., None] input_values.append(example.T ) SCREAMING_SNAKE_CASE = input_values if return_tensors is not None: SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_UpperCamelCase ) return padded_inputs
715
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowercase ( unittest.TestCase ): def __init__( self : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Dict=7 , _UpperCamelCase : Union[str, Any]=3 , _UpperCamelCase : Optional[int]=30 , _UpperCamelCase : List[Any]=400 , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : Any=True , _UpperCamelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , _UpperCamelCase : Tuple=True , _UpperCamelCase : List[Any]=1 / 255 , _UpperCamelCase : Optional[Any]=True , ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std SCREAMING_SNAKE_CASE = do_rescale SCREAMING_SNAKE_CASE = rescale_factor SCREAMING_SNAKE_CASE = do_pad def __snake_case( self : List[str] ) -> Union[str, Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __snake_case( self : Any , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=False ) -> List[Any]: '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE = image_inputs[0] if isinstance(_UpperCamelCase , Image.Image ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.size else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * h / w ) SCREAMING_SNAKE_CASE = self.size["shortest_edge"] elif w > h: SCREAMING_SNAKE_CASE = self.size["shortest_edge"] SCREAMING_SNAKE_CASE = int(self.size["shortest_edge"] * w / h ) else: SCREAMING_SNAKE_CASE = self.size["shortest_edge"] SCREAMING_SNAKE_CASE = self.size["shortest_edge"] else: SCREAMING_SNAKE_CASE = [] for image in image_inputs: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0] SCREAMING_SNAKE_CASE = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( a , unittest.TestCase ): lowercase__ : Optional[int] = DetaImageProcessor if is_vision_available() else None def __snake_case( self : List[str] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = DetaImageProcessingTester(self ) @property def __snake_case( self : int ) -> Tuple: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __snake_case( self : List[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_rescale" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_pad" ) ) self.assertTrue(hasattr(_UpperCamelCase , "size" ) ) def __snake_case( self : Optional[Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , _UpperCamelCase ) def __snake_case( self : str ) -> List[Any]: '''simple docstring''' pass def __snake_case( self : List[Any] ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __snake_case( self : List[str] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __snake_case( self : str ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __snake_case( self : Dict ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: SCREAMING_SNAKE_CASE = json.loads(f.read() ) SCREAMING_SNAKE_CASE = {"image_id": 39_769, "annotations": target} # encode them SCREAMING_SNAKE_CASE = DetaImageProcessor() SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="pt" ) # verify pixel values SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) ) # verify area SCREAMING_SNAKE_CASE = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) ) # verify boxes SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) ) # verify image_id SCREAMING_SNAKE_CASE = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) ) # verify is_crowd SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) ) # verify class_labels SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) ) # verify orig_size SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) ) # verify size SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) ) @slow def __snake_case( self : Dict ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: SCREAMING_SNAKE_CASE = json.loads(f.read() ) SCREAMING_SNAKE_CASE = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} SCREAMING_SNAKE_CASE = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them SCREAMING_SNAKE_CASE = DetaImageProcessor(format="coco_panoptic" ) SCREAMING_SNAKE_CASE = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="pt" ) # verify pixel values SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCamelCase , atol=1e-4 ) ) # verify area SCREAMING_SNAKE_CASE = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCamelCase ) ) # verify boxes SCREAMING_SNAKE_CASE = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCamelCase ) SCREAMING_SNAKE_CASE = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCamelCase , atol=1e-3 ) ) # verify image_id SCREAMING_SNAKE_CASE = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCamelCase ) ) # verify is_crowd SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCamelCase ) ) # verify class_labels SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCamelCase ) ) # verify masks SCREAMING_SNAKE_CASE = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCamelCase ) # verify orig_size SCREAMING_SNAKE_CASE = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCamelCase ) ) # verify size SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCamelCase ) )
647
0
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) def __lowerCamelCase (UpperCAmelCase__ : nn.ModuleList , UpperCAmelCase__ : nn.ModuleList , UpperCAmelCase__ : List[int] ): SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy] ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), F"{len(UpperCAmelCase__ )} != {len(UpperCAmelCase__ )}" dest_layers.load_state_dict(layers_to_copy.state_dict() ) _lowerCamelCase : List[Any] = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } _lowerCamelCase : List[str] = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ): try: SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( F"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first" F" {n_student}" ) return list(range(UpperCAmelCase__ ) ) def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ): if n_student > n_teacher: raise ValueError(F"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" ) elif n_teacher == n_student: return list(range(UpperCAmelCase__ ) ) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def __lowerCamelCase (UpperCAmelCase__ : Union[str, PreTrainedModel] , UpperCAmelCase__ : Union[str, Path] = "student" , UpperCAmelCase__ : Union[int, None] = None , UpperCAmelCase__ : Union[int, None] = None , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , **UpperCAmelCase__ : str , ): SCREAMING_SNAKE_CASE = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." assert (e is not None) or (d is not None), _msg if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): AutoTokenizer.from_pretrained(UpperCAmelCase__ ).save_pretrained(UpperCAmelCase__ ) # purely for convenience SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(UpperCAmelCase__ ).eval() else: assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ ), F"teacher must be a model or string got type {type(UpperCAmelCase__ )}" SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict() try: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: SCREAMING_SNAKE_CASE = teacher_e if d is None: SCREAMING_SNAKE_CASE = teacher_d init_kwargs.update({"encoder_layers": e, "decoder_layers": d} ) except AttributeError: # T5 if hasattr(teacher.config , "num_encoder_layers" ): SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: SCREAMING_SNAKE_CASE = teacher_e if d is None: SCREAMING_SNAKE_CASE = teacher_d if hasattr(teacher.config , "num_encoder_layers" ): init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} ) else: init_kwargs.update({"num_layers": e, "num_decoder_layers": d} ) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(UpperCAmelCase__ ) # Copy weights SCREAMING_SNAKE_CASE = teacher.config_class(**UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(UpperCAmelCase__ ) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=UpperCAmelCase__ ) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = list(range(UpperCAmelCase__ ) ), list(range(UpperCAmelCase__ ) ) logger.info( F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to" F" {save_path}" ) student.save_pretrained(UpperCAmelCase__ ) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: SCREAMING_SNAKE_CASE = pick_layers_to_copy(UpperCAmelCase__ , UpperCAmelCase__ ) if d_layers_to_copy is None: SCREAMING_SNAKE_CASE = pick_layers_to_copy(UpperCAmelCase__ , UpperCAmelCase__ ) try: if hasattr( UpperCAmelCase__ , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCAmelCase__ ) copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCAmelCase__ ) else: copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCAmelCase__ ) copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCAmelCase__ ) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block , student.encoder.block , UpperCAmelCase__ ) copy_layers(teacher.decoder.block , student.decoder.block , UpperCAmelCase__ ) logger.info( F"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" ) SCREAMING_SNAKE_CASE = { "teacher_type": teacher.config.model_type, "copied_encoder_layers": e_layers_to_copy, "copied_decoder_layers": d_layers_to_copy, } student.save_pretrained(UpperCAmelCase__ ) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
716
from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _lowerCamelCase : Optional[Any] = logging.get_logger(__name__) class lowercase ( a ): def __init__( self : str , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float , **_UpperCamelCase : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = sampling_rate SCREAMING_SNAKE_CASE = padding_value SCREAMING_SNAKE_CASE = kwargs.pop("padding_side" , "right" ) SCREAMING_SNAKE_CASE = kwargs.pop("return_attention_mask" , _UpperCamelCase ) super().__init__(**_UpperCamelCase ) def __snake_case( self : List[Any] , _UpperCamelCase : Union[ BatchFeature, List[BatchFeature], Dict[str, BatchFeature], Dict[str, List[BatchFeature]], List[Dict[str, BatchFeature]], ] , _UpperCamelCase : Union[bool, str, PaddingStrategy] = True , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , ) -> BatchFeature: '''simple docstring''' if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): SCREAMING_SNAKE_CASE = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( "You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`" F" to this method that includes {self.model_input_names[0]}, but you provided" F" {list(processed_features.keys() )}" ) SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]] SCREAMING_SNAKE_CASE = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(_UpperCamelCase ) == 0: if return_attention_mask: SCREAMING_SNAKE_CASE = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch SCREAMING_SNAKE_CASE = required_input[0] if isinstance(_UpperCamelCase , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. SCREAMING_SNAKE_CASE = 0 while len(required_input[index] ) == 0: index += 1 if index < len(_UpperCamelCase ): SCREAMING_SNAKE_CASE = required_input[index][0] if return_tensors is None: if is_tf_tensor(_UpperCamelCase ): SCREAMING_SNAKE_CASE = "tf" elif is_torch_tensor(_UpperCamelCase ): SCREAMING_SNAKE_CASE = "pt" elif isinstance(_UpperCamelCase , (int, float, list, tuple, np.ndarray) ): SCREAMING_SNAKE_CASE = "np" else: raise ValueError( F"type of {first_element} unknown: {type(_UpperCamelCase )}. " "Should be one of a python, numpy, pytorch or tensorflow object." ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): SCREAMING_SNAKE_CASE = to_numpy(_UpperCamelCase ) else: SCREAMING_SNAKE_CASE = [to_numpy(_UpperCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy SCREAMING_SNAKE_CASE = self._get_padding_strategies(padding=_UpperCamelCase , max_length=_UpperCamelCase ) SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]] SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) if not all(len(_UpperCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError("Some items in the output dictionary have a different batch size than others." ) SCREAMING_SNAKE_CASE = [] for i in range(_UpperCamelCase ): SCREAMING_SNAKE_CASE = {k: v[i] for k, v in processed_features.items()} # truncation SCREAMING_SNAKE_CASE = self._truncate( _UpperCamelCase , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , truncation=_UpperCamelCase , ) truncated_inputs.append(_UpperCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length SCREAMING_SNAKE_CASE = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) SCREAMING_SNAKE_CASE = PaddingStrategy.MAX_LENGTH SCREAMING_SNAKE_CASE = {} for i in range(_UpperCamelCase ): # padding SCREAMING_SNAKE_CASE = self._pad( truncated_inputs[i] , max_length=_UpperCamelCase , padding_strategy=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ) for key, value in outputs.items(): if key not in batch_outputs: SCREAMING_SNAKE_CASE = [] if value.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE = value.astype(np.floataa ) batch_outputs[key].append(_UpperCamelCase ) return BatchFeature(_UpperCamelCase , tensor_type=_UpperCamelCase ) def __snake_case( self : Union[str, Any] , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> dict: '''simple docstring''' SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of SCREAMING_SNAKE_CASE = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_UpperCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: SCREAMING_SNAKE_CASE = np.ones(len(_UpperCamelCase ) , dtype=np.intaa ) if needs_to_be_padded: SCREAMING_SNAKE_CASE = max_length - len(_UpperCamelCase ) if self.padding_side == "right": if return_attention_mask: SCREAMING_SNAKE_CASE = np.pad( processed_features["attention_mask"] , (0, difference) ) SCREAMING_SNAKE_CASE = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) SCREAMING_SNAKE_CASE = np.pad( _UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: SCREAMING_SNAKE_CASE = np.pad( processed_features["attention_mask"] , (difference, 0) ) SCREAMING_SNAKE_CASE = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) SCREAMING_SNAKE_CASE = np.pad( _UpperCamelCase , _UpperCamelCase , "constant" , constant_values=self.padding_value ) else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return processed_features def __snake_case( self : Dict , _UpperCamelCase : Union[Dict[str, np.ndarray], BatchFeature] , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[bool] = None , ) -> Optional[int]: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." ) SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): SCREAMING_SNAKE_CASE = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of SCREAMING_SNAKE_CASE = len(_UpperCamelCase ) > max_length if needs_to_be_truncated: SCREAMING_SNAKE_CASE = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: SCREAMING_SNAKE_CASE = processed_features["attention_mask"][:max_length] return processed_features def __snake_case( self : Optional[Any] , _UpperCamelCase : int=False , _UpperCamelCase : Tuple=None ) -> Tuple: '''simple docstring''' if padding is not False: if padding is True: SCREAMING_SNAKE_CASE = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(_UpperCamelCase , _UpperCamelCase ): SCREAMING_SNAKE_CASE = PaddingStrategy(_UpperCamelCase ) elif isinstance(_UpperCamelCase , _UpperCamelCase ): SCREAMING_SNAKE_CASE = padding else: SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( F"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( "Asking to pad but the feature_extractor does not have a padding value. Please select a value to use" " as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." ) return padding_strategy
647
0
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __lowerCamelCase (UpperCAmelCase__ : Optional[int] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] ): SCREAMING_SNAKE_CASE = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue SCREAMING_SNAKE_CASE = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) SCREAMING_SNAKE_CASE = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) SCREAMING_SNAKE_CASE = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) SCREAMING_SNAKE_CASE = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) SCREAMING_SNAKE_CASE = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) SCREAMING_SNAKE_CASE = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) SCREAMING_SNAKE_CASE = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) SCREAMING_SNAKE_CASE = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) SCREAMING_SNAKE_CASE = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) SCREAMING_SNAKE_CASE = key.replace("image_encoder.module" , "flava.image_model" ) SCREAMING_SNAKE_CASE = key.replace("text_encoder.module" , "flava.text_model" ) SCREAMING_SNAKE_CASE = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) SCREAMING_SNAKE_CASE = key.replace("mm_encoder.module" , "flava.multimodal_model" ) SCREAMING_SNAKE_CASE = key.replace("text_projection" , "flava.text_projection" ) SCREAMING_SNAKE_CASE = key.replace("image_projection" , "flava.image_projection" ) SCREAMING_SNAKE_CASE = value.float() for key, value in codebook_state_dict.items(): SCREAMING_SNAKE_CASE = value return upgrade @torch.no_grad() def __lowerCamelCase (UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any]=None ): if config_path is not None: SCREAMING_SNAKE_CASE = FlavaConfig.from_pretrained(UpperCAmelCase__ ) else: SCREAMING_SNAKE_CASE = FlavaConfig() SCREAMING_SNAKE_CASE = FlavaForPreTraining(UpperCAmelCase__ ).eval() SCREAMING_SNAKE_CASE = convert_dalle_checkpoint(UpperCAmelCase__ , UpperCAmelCase__ , save_checkpoint=UpperCAmelCase__ ) if os.path.exists(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , map_location="cpu" ) else: SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" ) SCREAMING_SNAKE_CASE = upgrade_state_dict(UpperCAmelCase__ , UpperCAmelCase__ ) hf_model.load_state_dict(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = hf_model.state_dict() SCREAMING_SNAKE_CASE = count_parameters(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = count_parameters(UpperCAmelCase__ ) + count_parameters(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 ) hf_model.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": _lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') _lowerCamelCase : Dict = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
717
import functools def __lowerCamelCase (UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[int] ): # Validation if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for day in days ): raise ValueError("The parameter days should be a list of integers" ) if len(UpperCAmelCase__ ) != 3 or not all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for cost in costs ): raise ValueError("The parameter costs should be a list of three integers" ) if len(UpperCAmelCase__ ) == 0: return 0 if min(UpperCAmelCase__ ) <= 0: raise ValueError("All days elements should be greater than 0" ) if max(UpperCAmelCase__ ) >= 3_6_6: raise ValueError("All days elements should be less than 366" ) SCREAMING_SNAKE_CASE = set(UpperCAmelCase__ ) @functools.cache def dynamic_programming(UpperCAmelCase__ : int ) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
647
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowercase ( unittest.TestCase ): def __init__( self : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]=13 , _UpperCamelCase : Optional[int]=3 , _UpperCamelCase : Dict=224 , _UpperCamelCase : Dict=30 , _UpperCamelCase : Any=400 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[Any]=None , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Any=[0.5, 0.5, 0.5] , _UpperCamelCase : Any=[0.5, 0.5, 0.5] , ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std def __snake_case( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowercase ( a , unittest.TestCase ): lowercase__ : Optional[Any] = ViTImageProcessor if is_vision_available() else None def __snake_case( self : int ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = EfficientFormerImageProcessorTester(self ) @property def __snake_case( self : Union[str, Any] ) -> Tuple: '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def __snake_case( self : Optional[int] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCamelCase , "size" ) ) def __snake_case( self : List[Any] ) -> Optional[Any]: '''simple docstring''' pass def __snake_case( self : List[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __snake_case( self : List[Any] ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def __snake_case( self : Dict ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_proc_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
718
from __future__ import annotations import math def __lowerCamelCase (UpperCAmelCase__ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True _lowerCamelCase : Tuple = [num for num in range(3, 10_00_01, 2) if not is_prime(num)] def __lowerCamelCase (UpperCAmelCase__ : int ): if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): raise ValueError("n must be an integer" ) if n <= 0: raise ValueError("n must be >= 0" ) SCREAMING_SNAKE_CASE = [] for num in range(len(UpperCAmelCase__ ) ): SCREAMING_SNAKE_CASE = 0 while 2 * i * i <= odd_composites[num]: SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i if is_prime(UpperCAmelCase__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(UpperCAmelCase__ ) == n: return list_nums return [] def __lowerCamelCase (): return compute_nums(1 )[0] if __name__ == "__main__": print(f"""{solution() = }""")
647
0
def __lowerCamelCase (UpperCAmelCase__ : list[int] ): '''simple docstring''' if not numbers: return 0 if not isinstance(UpperCAmelCase__ , (list, tuple) ) or not all( isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for number in numbers ): raise ValueError("numbers must be an iterable of integers" ) SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = numbers[0] for i in range(1 , len(UpperCAmelCase__ ) ): # update the maximum and minimum subarray products SCREAMING_SNAKE_CASE = numbers[i] if number < 0: SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = min_till_now, max_till_now SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , max_till_now * number ) SCREAMING_SNAKE_CASE = min(UpperCAmelCase__ , min_till_now * number ) # update the maximum product found till now SCREAMING_SNAKE_CASE = max(UpperCAmelCase__ , UpperCAmelCase__ ) return max_prod
719
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowercase ( unittest.TestCase ): def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = num_channels SCREAMING_SNAKE_CASE = image_size SCREAMING_SNAKE_CASE = min_resolution SCREAMING_SNAKE_CASE = max_resolution SCREAMING_SNAKE_CASE = do_resize SCREAMING_SNAKE_CASE = size SCREAMING_SNAKE_CASE = do_normalize SCREAMING_SNAKE_CASE = image_mean SCREAMING_SNAKE_CASE = image_std def __snake_case( self : List[Any] ) -> Any: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowercase ( a , unittest.TestCase ): lowercase__ : Any = DPTImageProcessor if is_vision_available() else None def __snake_case( self : List[str] ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self ) @property def __snake_case( self : List[Any] ) -> List[str]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __snake_case( self : str ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) ) self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) ) self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) ) self.assertTrue(hasattr(_UpperCamelCase , "size" ) ) def __snake_case( self : Dict ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def __snake_case( self : Union[str, Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __snake_case( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def __snake_case( self : Optional[Any] ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase ) for image in image_inputs: self.assertIsInstance(_UpperCamelCase , torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , )
647
0
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin _lowerCamelCase : List[str] = random.Random() if is_torch_available(): import torch def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=None ): if rng is None: SCREAMING_SNAKE_CASE = global_rng SCREAMING_SNAKE_CASE = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowercase ( unittest.TestCase ): def __init__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int=7 , _UpperCamelCase : Optional[Any]=400 , _UpperCamelCase : List[str]=2_000 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[Any]=16_000 , _UpperCamelCase : str=True , _UpperCamelCase : int=True , ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = parent SCREAMING_SNAKE_CASE = batch_size SCREAMING_SNAKE_CASE = min_seq_length SCREAMING_SNAKE_CASE = max_seq_length SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) SCREAMING_SNAKE_CASE = feature_size SCREAMING_SNAKE_CASE = padding_value SCREAMING_SNAKE_CASE = sampling_rate SCREAMING_SNAKE_CASE = return_attention_mask SCREAMING_SNAKE_CASE = do_normalize def __snake_case( self : Dict ) -> str: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __snake_case( self : int , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Dict=False ) -> Tuple: '''simple docstring''' def _flatten(_UpperCamelCase : int ): return list(itertools.chain(*_UpperCamelCase ) ) if equal_length: SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size SCREAMING_SNAKE_CASE = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowercase ( a , unittest.TestCase ): lowercase__ : List[Any] = ASTFeatureExtractor def __snake_case( self : Dict ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self ) def __snake_case( self : Optional[Any] ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )] SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs] # Test not batched input SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) ) # Test batched SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ): self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)] SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase ) SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ): self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) ) @require_torch def __snake_case( self : str ) -> Dict: '''simple docstring''' import torch SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa ) SCREAMING_SNAKE_CASE = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' from datasets import load_dataset SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __snake_case( self : List[str] ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = torch.tensor( [-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6, -1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3, -1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6, -0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] ) # fmt: on SCREAMING_SNAKE_CASE = self._load_datasamples(1 ) SCREAMING_SNAKE_CASE = ASTFeatureExtractor() SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1_024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCamelCase , atol=1e-4 ) )
720
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD torch.set_grad_enabled(False) def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ): SCREAMING_SNAKE_CASE = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") ) rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") ) rename_keys.append( (F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") ) rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") ) rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") ) rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") ) # projection layer + position embeddings rename_keys.extend( [ ("module.cls_token", "vit.embeddings.cls_token"), ("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"), ("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"), ("module.pos_embed", "vit.embeddings.position_embeddings"), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("module.norm.weight", "layernorm.weight"), ("module.norm.bias", "layernorm.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) return rename_keys def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ): for i in range(config.num_hidden_layers ): if base_model: SCREAMING_SNAKE_CASE = "" else: SCREAMING_SNAKE_CASE = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" ) SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict SCREAMING_SNAKE_CASE = in_proj_weight[ : config.hidden_size, : ] SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size] SCREAMING_SNAKE_CASE = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] SCREAMING_SNAKE_CASE = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] SCREAMING_SNAKE_CASE = in_proj_weight[ -config.hidden_size :, : ] SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :] def __lowerCamelCase (UpperCAmelCase__ : Tuple ): SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCamelCase (UpperCAmelCase__ : Any ): # projection head is used in the self-supervised pre-training in MSN, # for downstream task it's not needed. SCREAMING_SNAKE_CASE = [ "module.fc.fc1.weight", "module.fc.fc1.bias", "module.fc.bn1.weight", "module.fc.bn1.bias", "module.fc.bn1.running_mean", "module.fc.bn1.running_var", "module.fc.bn1.num_batches_tracked", "module.fc.fc2.weight", "module.fc.fc2.bias", "module.fc.bn2.weight", "module.fc.bn2.bias", "module.fc.bn2.running_mean", "module.fc.bn2.running_var", "module.fc.bn2.num_batches_tracked", "module.fc.fc3.weight", "module.fc.fc3.bias", ] for k in ignore_keys: state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ ) def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ): SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = val def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ): SCREAMING_SNAKE_CASE = ViTMSNConfig() SCREAMING_SNAKE_CASE = 1_0_0_0 SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files" SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json" SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) ) SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()} SCREAMING_SNAKE_CASE = idalabel SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()} if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE = 3_8_4 SCREAMING_SNAKE_CASE = 1_5_3_6 SCREAMING_SNAKE_CASE = 6 elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE = 1_0_2_4 SCREAMING_SNAKE_CASE = 4_0_9_6 SCREAMING_SNAKE_CASE = 2_4 SCREAMING_SNAKE_CASE = 1_6 SCREAMING_SNAKE_CASE = 0.1 elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE = 4 elif "l7" in checkpoint_url: SCREAMING_SNAKE_CASE = 7 SCREAMING_SNAKE_CASE = 1_0_2_4 SCREAMING_SNAKE_CASE = 4_0_9_6 SCREAMING_SNAKE_CASE = 2_4 SCREAMING_SNAKE_CASE = 1_6 SCREAMING_SNAKE_CASE = 0.1 SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"] SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size ) remove_projection_head(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ ) for src, dest in rename_keys: rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ ) model.load_state_dict(UpperCAmelCase__ ) model.eval() SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg" SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw ) SCREAMING_SNAKE_CASE = ViTImageProcessor( size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" ) # forward pass torch.manual_seed(2 ) SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = outputs.last_hidden_state # The following Colab Notebook was used to generate these outputs: # https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb if "s16" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] ) elif "b16" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] ) elif "l16" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] ) elif "b4" in checkpoint_url: SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] ) else: SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] ) # verify logits assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 ) print(F"Saving model to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCAmelCase__ ) print(F"Saving image processor to {pytorch_dump_folder_path}" ) image_processor.save_pretrained(UpperCAmelCase__ ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) _lowerCamelCase : Optional[Any] = parser.parse_args() convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
647
0
def __lowerCamelCase(UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : int = 1_0_0_0 ): SCREAMING_SNAKE_CASE = 1 SCREAMING_SNAKE_CASE = 0 for divide_by_number in range(UpperCAmelCase__ , digit + 1 ): SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = numerator for _ in range(1 , digit + 1 ): if now_divide in has_been_divided: if longest_list_length < len(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = divide_by_number else: has_been_divided.append(UpperCAmelCase__ ) SCREAMING_SNAKE_CASE = now_divide * 1_0 % divide_by_number return the_digit # Tests if __name__ == "__main__": import doctest doctest.testmod()
721
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCamelCase : Dict = logging.get_logger(__name__) _lowerCamelCase : List[Any] = '''▁''' _lowerCamelCase : Optional[int] = {'''vocab_file''': '''prophetnet.tokenizer'''} _lowerCamelCase : str = { '''vocab_file''': { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer''' ), } } _lowerCamelCase : Optional[Any] = { '''microsoft/xprophetnet-large-wiki100-cased''': {'''do_lower_case''': False}, } _lowerCamelCase : Optional[Any] = { '''microsoft/xprophetnet-large-wiki100-cased''': 5_12, } def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] ): SCREAMING_SNAKE_CASE = collections.OrderedDict() with open(UpperCAmelCase__ , "r" , encoding="utf-8" ) as reader: SCREAMING_SNAKE_CASE = reader.readlines() for index, token in enumerate(UpperCAmelCase__ ): SCREAMING_SNAKE_CASE = token.rstrip("\n" ) SCREAMING_SNAKE_CASE = index return vocab class lowercase ( a ): lowercase__ : Optional[int] = VOCAB_FILES_NAMES lowercase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP lowercase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ : Any = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Dict="[SEP]" , _UpperCamelCase : Tuple="[UNK]" , _UpperCamelCase : Dict="[PAD]" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : Optional[Any]="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : Dict , ) -> None: '''simple docstring''' SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , sep_token=_UpperCamelCase , unk_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , ) try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCamelCase ) ) SCREAMING_SNAKE_CASE = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab SCREAMING_SNAKE_CASE = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} for i in range(10 ): SCREAMING_SNAKE_CASE = F"[unused{i}]" SCREAMING_SNAKE_CASE = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE = 12 SCREAMING_SNAKE_CASE = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(_UpperCamelCase ) def __getstate__( self : Dict ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE = self.__dict__.copy() SCREAMING_SNAKE_CASE = None return state def __setstate__( self : List[Any] , _UpperCamelCase : Union[str, Any] ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE = d try: import sentencepiece as spm except ImportError: logger.warning( "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" " pip install sentencepiece" ) raise # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): SCREAMING_SNAKE_CASE = {} SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __snake_case( self : Dict , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase ) if token_ids_a is None: return ([0] * len(_UpperCamelCase )) + [1] return ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1] def __snake_case( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' SCREAMING_SNAKE_CASE = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __snake_case( self : Dict ) -> Optional[Any]: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset def __snake_case( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __snake_case( self : Union[str, Any] , _UpperCamelCase : str ) -> str: '''simple docstring''' return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase ) def __snake_case( self : Optional[Any] , _UpperCamelCase : List[Any] ) -> List[str]: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE = self.sp_model.PieceToId(_UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __snake_case( self : str , _UpperCamelCase : str ) -> int: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __snake_case( self : List[str] , _UpperCamelCase : Dict ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE = "".join(_UpperCamelCase ).replace(_UpperCamelCase , " " ).strip() return out_string def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(_UpperCamelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return SCREAMING_SNAKE_CASE = os.path.join( _UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCamelCase , "wb" ) as fi: SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto() fi.write(_UpperCamelCase ) return (out_vocab_file,) def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
647
0
from scipy.stats import spearmanr import datasets lowerCAmelCase__ = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowerCAmelCase__ = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowerCAmelCase__ = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def A__ ( self): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('float'), 'references': datasets.Value('float'), }) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , ) def A__ ( self , __snake_case , __snake_case , __snake_case=False): _UpperCamelCase : Any = spearmanr(__snake_case , __snake_case) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
648
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowerCAmelCase__ = 5 lowerCAmelCase__ = 1_0 @require_sentencepiece @require_tokenizers class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = SpeechaTextTokenizer a__ = False a__ = True def A__ ( self): super().setUp() _UpperCamelCase : Any = sp.SentencePieceProcessor() spm_model.Load(__snake_case) _UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))] _UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case)))) _UpperCamelCase : Tuple = Path(self.tmpdirname) save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file']) _UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def A__ ( self): _UpperCamelCase : str = '<pad>' _UpperCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(__snake_case) , 10_01) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_01) def A__ ( self): _UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) _UpperCamelCase : List[str] = tokenizer.tokenize('This is a test') self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , ) _UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case) self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8]) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def A__ ( self): # fmt: off _UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" a__ = "valhalla/s2t_mustc_multilinguial_medium" a__ = "C'est trop cool" a__ = "Esto es genial" @classmethod def A__ ( cls): _UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def A__ ( self): self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11) def A__ ( self): self.assertEqual(self.tokenizer.vocab_size , 1_00_00) def A__ ( self): self.assertIn(__snake_case , self.tokenizer.all_special_ids) _UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2] _UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case) _UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case) self.assertEqual(__snake_case , __snake_case) self.assertNotIn(self.tokenizer.eos_token , __snake_case) def A__ ( self): _UpperCamelCase : Any = 'fr' _UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , __snake_case) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def A__ ( self): _UpperCamelCase : Union[str, Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) _UpperCamelCase : List[str] = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
648
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { """configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""BloomTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""", """BloomForCausalLM""", """BloomModel""", """BloomPreTrainedModel""", """BloomForSequenceClassification""", """BloomForTokenClassification""", """BloomForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class lowercase ( _lowercase ): """simple docstring""" a__ = "masked_bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Any = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : str = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : Dict = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : List[Any] = layer_norm_eps _UpperCamelCase : Tuple = pruning_method _UpperCamelCase : Tuple = mask_init _UpperCamelCase : Dict = mask_scale
648
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase ( _lowercase ): """simple docstring""" a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ): _UpperCamelCase : str = vocab_size _UpperCamelCase : int = context_length _UpperCamelCase : Tuple = hidden_size _UpperCamelCase : Tuple = num_hidden_layers _UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Dict = rescale_every _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
648
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self , __snake_case=32): set_seed(0) _UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3) _UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1) return model, optimizer @slow def A__ ( self): _UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) _UpperCamelCase : List[Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) _UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)] _UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)] _UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)] # train with a DDPM scheduler _UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample _UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample _UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5)) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
648
1
from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowercase ( _lowercase ): """simple docstring""" a__ = "" a__ = "hf-legacy" # "hf://"" is reserved for hffs def __init__( self , __snake_case = None , __snake_case = None , **__snake_case , ): super().__init__(self , **__snake_case) _UpperCamelCase : List[Any] = repo_info _UpperCamelCase : int = token _UpperCamelCase : int = None def A__ ( self): if self.dir_cache is None: _UpperCamelCase : Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes _UpperCamelCase : Union[str, Any] = { 'name': hf_file.rfilename, 'size': None, 'type': 'file', } self.dir_cache.update( { str(__snake_case): {'name': str(__snake_case), 'size': None, 'type': 'directory'} for d in list(PurePosixPath(hf_file.rfilename).parents)[:-1] }) def A__ ( self , __snake_case , __snake_case = "rb" , **__snake_case , ): if not isinstance(self.repo_info , __snake_case): raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''') _UpperCamelCase : str = hf_hub_url(self.repo_info.id , __snake_case , revision=self.repo_info.sha) return fsspec.open( __snake_case , mode=__snake_case , headers=get_authentication_headers_for_url(__snake_case , use_auth_token=self.token) , client_kwargs={'trust_env': True} , ).open() def A__ ( self , __snake_case , **__snake_case): self._get_dirs() _UpperCamelCase : int = self._strip_protocol(__snake_case) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(__snake_case) def A__ ( self , __snake_case , __snake_case=False , **__snake_case): self._get_dirs() _UpperCamelCase : List[str] = PurePosixPath(path.strip('/')) _UpperCamelCase : Any = {} for p, f in self.dir_cache.items(): _UpperCamelCase : Any = PurePosixPath(p.strip('/')) _UpperCamelCase : Tuple = p.parent if root == path: _UpperCamelCase : Optional[Any] = f _UpperCamelCase : Optional[Any] = list(paths.values()) if detail: return out else: return sorted(f['name'] for f in out)
648
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase__ = { """sample_size""": 3_2, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [3_2, 6_4], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 6_4, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 2_5_6, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """num_train_timesteps""": 4_0, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 1_5_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str: '''simple docstring''' _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias'''] _UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Optional[Any] = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : Union[str, Any] = {} _UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight'] _UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias'] _UpperCamelCase : Dict = checkpoint['time_embed.2.weight'] _UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _UpperCamelCase : List[str] = checkpoint['label_emb.weight'] _UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight'] _UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _UpperCamelCase : Optional[int] = unet_config['down_block_types'] _UpperCamelCase : Optional[Any] = unet_config['layers_per_block'] _UpperCamelCase : Dict = unet_config['attention_head_dim'] _UpperCamelCase : List[str] = unet_config['block_out_channels'] _UpperCamelCase : str = 1 _UpperCamelCase : Optional[int] = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): _UpperCamelCase : List[str] = channels_list[i] _UpperCamelCase : str = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : str = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1''' _UpperCamelCase : Dict = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 _UpperCamelCase : Tuple = current_channels # hardcoded the mid-block for now _UpperCamelCase : Any = 'mid_block.resnets.0' _UpperCamelCase : Optional[Any] = 'middle_block.0' _UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = 'mid_block.attentions.0' _UpperCamelCase : Tuple = 'middle_block.1' _UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Tuple = 'mid_block.resnets.1' _UpperCamelCase : str = 'middle_block.2' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Optional[int] = unet_config['up_block_types'] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}''' _UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1''' _UpperCamelCase : Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2''' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = checkpoint['out.0.weight'] _UpperCamelCase : str = checkpoint['out.0.bias'] _UpperCamelCase : int = checkpoint['out.2.weight'] _UpperCamelCase : List[Any] = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = strabool(args.class_cond) lowerCAmelCase__ = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase__ = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: lowerCAmelCase__ = None lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
648
1
from collections import defaultdict from math import ceil, sqrt def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0_0_0_0_0 , UpperCAmelCase_ : int = 1_0 ) -> int: '''simple docstring''' _UpperCamelCase : defaultdict = defaultdict(UpperCAmelCase_ ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: _UpperCamelCase : int = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: _UpperCamelCase : int = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(UpperCAmelCase_ , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f'{solution() = }')
648
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list: '''simple docstring''' if len(UpperCAmelCase_ ) <= 1: return [tuple(UpperCAmelCase_ )] _UpperCamelCase : List[Any] = [] def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ): _UpperCamelCase : Optional[int] = [0] * n res.append(tuple(UpperCAmelCase_ ) ) _UpperCamelCase : List[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0] else: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase_ ) ) c[i] += 1 _UpperCamelCase : Tuple = 0 else: _UpperCamelCase : Tuple = 0 i += 1 generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return res if __name__ == "__main__": lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
648
1
import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case , __snake_case): if dst_width < 0 or dst_height < 0: raise ValueError('Destination width/height should be > 0') _UpperCamelCase : Dict = img _UpperCamelCase : Optional[int] = img.shape[1] _UpperCamelCase : str = img.shape[0] _UpperCamelCase : Any = dst_width _UpperCamelCase : Dict = dst_height _UpperCamelCase : Any = self.src_w / self.dst_w _UpperCamelCase : Optional[int] = self.src_h / self.dst_h _UpperCamelCase : str = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_55 ) def A__ ( self): for i in range(self.dst_h): for j in range(self.dst_w): _UpperCamelCase : Optional[Any] = self.img[self.get_y(__snake_case)][self.get_x(__snake_case)] def A__ ( self , __snake_case): return int(self.ratio_x * x) def A__ ( self , __snake_case): return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase__ , lowerCAmelCase__ = 8_0_0, 6_0_0 lowerCAmelCase__ = imread("""image_data/lena.jpg""", 1) lowerCAmelCase__ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output ) waitKey(0) destroyAllWindows()
648
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ ) if k.startswith('encoder' ): _UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' ) _UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): _UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' ) return k def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: _UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ ) _UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd _UpperCamelCase : Tuple = v lowerCAmelCase__ = ["""START"""] @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]: '''simple docstring''' _UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : int = model['model'] _UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ ) _UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ ) _UpperCamelCase : int = m.model.state_dict().keys() _UpperCamelCase : Union[str, Any] = [] _UpperCamelCase : int = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase : int = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCAmelCase_ ) m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) m.half() m.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) lowerCAmelCase__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
648
1
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowercase ( _lowercase ): """simple docstring""" def __init__( self , *__snake_case , **__snake_case): warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , __snake_case , ) super().__init__(*__snake_case , **__snake_case)
648
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""] lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class lowercase ( tf.keras.Model ): """simple docstring""" def __init__( self , __snake_case): super().__init__() _UpperCamelCase : List[Any] = tokenizer _UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case) _UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : Any = self.tokenizer(__snake_case) _UpperCamelCase : Dict = self.bert(**__snake_case) return out["pooler_output"] @require_tf @require_tensorflow_text class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): super().setUp() _UpperCamelCase : Optional[Any] = [ BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) _UpperCamelCase : Optional[Any] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] _UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1])) def A__ ( self): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest') _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences) _UpperCamelCase : Optional[Any] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf.function(__snake_case) for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : Optional[int] = tf.constant(__snake_case) _UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case) _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case) _UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences) _UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _UpperCamelCase : int = Path(__snake_case) / 'saved.model' model.save(__snake_case) _UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case) _UpperCamelCase : int = loaded_model(__snake_case) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
648
1
from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 lowerCAmelCase__ = { # 1536-bit 5: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 2048-bit 1_4: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AACAA68FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 3072-bit 1_5: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 4096-bit 1_6: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7""" + """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA""" + """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6""" + """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED""" + """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9""" + """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199""" + """FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 6144-bit 1_7: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08""" + """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B""" + """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9""" + """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6""" + """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8""" + """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C""" + """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718""" + """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D""" + """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D""" + """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226""" + """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC""" + """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26""" + """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB""" + """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2""" + """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127""" + """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492""" + """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406""" + """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918""" + """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151""" + """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03""" + """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F""" + """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA""" + """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B""" + """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632""" + """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E""" + """6DCC4024FFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, # 8192-bit 1_8: { """prime""": int( """FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1""" + """29024E088A67CC74020BBEA63B139B22514A08798E3404DD""" + """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245""" + """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED""" + """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D""" + """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F""" + """83655D23DCA3AD961C62F356208552BB9ED529077096966D""" + """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B""" + """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9""" + """DE2BCBF6955817183995497CEA956AE515D2261898FA0510""" + """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64""" + """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7""" + """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B""" + """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C""" + """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31""" + """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7""" + """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA""" + """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6""" + """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED""" + """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9""" + """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492""" + """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD""" + """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831""" + """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B""" + """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF""" + """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6""" + """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3""" + """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA""" + """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328""" + """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C""" + """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE""" + """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4""" + """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300""" + """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568""" + """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9""" + """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B""" + """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A""" + """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36""" + """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1""" + """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92""" + """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47""" + """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71""" + """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""", base=1_6, ), """generator""": 2, }, } class lowercase : """simple docstring""" def __init__( self , __snake_case = 14): if group not in primes: raise ValueError('Unsupported Group') _UpperCamelCase : Any = primes[group]['prime'] _UpperCamelCase : List[str] = primes[group]['generator'] _UpperCamelCase : List[Any] = int(hexlify(urandom(32)) , base=16) def A__ ( self): return hex(self.__private_key)[2:] def A__ ( self): _UpperCamelCase : int = pow(self.generator , self.__private_key , self.prime) return hex(__snake_case)[2:] def A__ ( self , __snake_case): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(__snake_case , (self.prime - 1) // 2 , self.prime) == 1 ) def A__ ( self , __snake_case): _UpperCamelCase : Any = int(__snake_case , base=16) if not self.is_valid_public_key(__snake_case): raise ValueError('Invalid public key') _UpperCamelCase : List[str] = pow(__snake_case , self.__private_key , self.prime) return shaaaa(str(__snake_case).encode()).hexdigest() @staticmethod def A__ ( __snake_case , __snake_case): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(__snake_case , (prime - 1) // 2 , __snake_case) == 1 ) @staticmethod def A__ ( __snake_case , __snake_case , __snake_case = 14): _UpperCamelCase : int = int(__snake_case , base=16) _UpperCamelCase : Union[str, Any] = int(__snake_case , base=16) _UpperCamelCase : List[Any] = primes[group]['prime'] if not DiffieHellman.is_valid_public_key_static(__snake_case , __snake_case): raise ValueError('Invalid public key') _UpperCamelCase : Optional[int] = pow(__snake_case , __snake_case , __snake_case) return shaaaa(str(__snake_case).encode()).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
648
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { """configuration_distilbert""": [ """DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DistilBertConfig""", """DistilBertOnnxConfig""", ], """tokenization_distilbert""": ["""DistilBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""DistilBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """DistilBertForMaskedLM""", """DistilBertForMultipleChoice""", """DistilBertForQuestionAnswering""", """DistilBertForSequenceClassification""", """DistilBertForTokenClassification""", """DistilBertModel""", """DistilBertPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFDistilBertForMaskedLM""", """TFDistilBertForMultipleChoice""", """TFDistilBertForQuestionAnswering""", """TFDistilBertForSequenceClassification""", """TFDistilBertForTokenClassification""", """TFDistilBertMainLayer""", """TFDistilBertModel""", """TFDistilBertPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """FlaxDistilBertForMaskedLM""", """FlaxDistilBertForMultipleChoice""", """FlaxDistilBertForQuestionAnswering""", """FlaxDistilBertForSequenceClassification""", """FlaxDistilBertForTokenClassification""", """FlaxDistilBertModel""", """FlaxDistilBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_distilbert import ( DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : Optional[int] = image_size _UpperCamelCase : str = num_channels _UpperCamelCase : Optional[Any] = embeddings_size _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : str = is_training _UpperCamelCase : Optional[int] = use_labels _UpperCamelCase : str = hidden_act _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Tuple = len(__snake_case) _UpperCamelCase : Dict = out_features _UpperCamelCase : Union[str, Any] = out_indices _UpperCamelCase : int = num_groups def A__ ( self): _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : str = None if self.use_labels: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def A__ ( self): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = BitModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Dict = self.num_labels _UpperCamelCase : Dict = BitForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _UpperCamelCase : Any = None _UpperCamelCase : str = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self): _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Dict = BitModelTester(self) _UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='Bit does not output attentions') def A__ ( self): pass @unittest.skip(reason='Bit does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='Bit does not support input and output embeddings') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(__snake_case) _UpperCamelCase : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Optional[int] = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(config=__snake_case) for name, module in model.named_modules(): if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def A__ ( self): def check_hidden_states_output(__snake_case , __snake_case , __snake_case): _UpperCamelCase : str = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : str = self.model_tester.num_stages self.assertEqual(len(__snake_case) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : Any = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[str] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) @unittest.skip(reason='Bit does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def A__ ( self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): _UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Any = model(**__snake_case) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def A__ ( self): _UpperCamelCase : List[str] = BitModelTester(self)
648
1
from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image lowerCAmelCase__ = ["""text""", """image""", """audio"""] def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : Dict = [] for input_type in input_types: if input_type == "text": inputs.append('Text input' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) ) elif input_type == "audio": inputs.append(torch.ones(3_0_0_0 ) ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): inputs.append(create_inputs(UpperCAmelCase_ ) ) else: raise ValueError(F'''Invalid type requested: {input_type}''' ) return inputs def lowerCamelCase_ ( UpperCAmelCase_ : List ) -> str: '''simple docstring''' _UpperCamelCase : Optional[Any] = [] for output in outputs: if isinstance(UpperCAmelCase_ , (str, AgentText) ): output_types.append('text' ) elif isinstance(UpperCAmelCase_ , (Image.Image, AgentImage) ): output_types.append('image' ) elif isinstance(UpperCAmelCase_ , (torch.Tensor, AgentAudio) ): output_types.append('audio' ) else: raise ValueError(F'''Invalid output: {output}''' ) return output_types @is_tool_test class lowercase : """simple docstring""" def A__ ( self): self.assertTrue(hasattr(self.tool , 'inputs')) self.assertTrue(hasattr(self.tool , 'outputs')) _UpperCamelCase : str = self.tool.inputs for _input in inputs: if isinstance(_input , __snake_case): for __input in _input: self.assertTrue(__input in authorized_types) else: self.assertTrue(_input in authorized_types) _UpperCamelCase : Optional[int] = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types) def A__ ( self): _UpperCamelCase : int = create_inputs(self.tool.inputs) _UpperCamelCase : Any = self.tool(*__snake_case) # There is a single output if len(self.tool.outputs) == 1: _UpperCamelCase : Tuple = [outputs] self.assertListEqual(output_types(__snake_case) , self.tool.outputs) def A__ ( self): self.assertTrue(hasattr(self.tool , 'description')) self.assertTrue(hasattr(self.tool , 'default_checkpoint')) self.assertTrue(self.tool.description.startswith('This is a tool that')) def A__ ( self): _UpperCamelCase : str = create_inputs(self.tool.inputs) _UpperCamelCase : int = self.tool(*__snake_case) if not isinstance(__snake_case , __snake_case): _UpperCamelCase : Dict = [outputs] self.assertEqual(len(__snake_case) , len(self.tool.outputs)) for output, output_type in zip(__snake_case , self.tool.outputs): _UpperCamelCase : Any = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(__snake_case , __snake_case)) def A__ ( self): _UpperCamelCase : List[Any] = create_inputs(self.tool.inputs) _UpperCamelCase : List[str] = [] for _input, input_type in zip(__snake_case , self.tool.inputs): if isinstance(__snake_case , __snake_case): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type]) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input)) # Should not raise an error _UpperCamelCase : Optional[int] = self.tool(*__snake_case) if not isinstance(__snake_case , __snake_case): _UpperCamelCase : str = [outputs] self.assertEqual(len(__snake_case) , len(self.tool.outputs))
648
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowerCAmelCase__ = numpy.array([0, 0]) lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54]) lowerCAmelCase__ = numpy.array([1, 0]) lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : Tuple = initial_vectors for _ in range(UpperCAmelCase_ ): _UpperCamelCase : str = iteration_step(UpperCAmelCase_ ) return vectors def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : int = [] for i, start_vector in enumerate(vectors[:-1] ): _UpperCamelCase : Union[str, Any] = vectors[i + 1] new_vectors.append(UpperCAmelCase_ ) _UpperCamelCase : Tuple = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray: '''simple docstring''' _UpperCamelCase : str = numpy.radians(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ ) _UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) ) return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None: '''simple docstring''' _UpperCamelCase : str = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ ) plt.plot(UpperCAmelCase_ , UpperCAmelCase_ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
648
1
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self , __snake_case=32): set_seed(0) _UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3) _UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1) return model, optimizer @slow def A__ ( self): _UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) _UpperCamelCase : List[Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) _UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)] _UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)] _UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)] # train with a DDPM scheduler _UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample _UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample _UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5)) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
648
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple: '''simple docstring''' if exitstatus == 5: _UpperCamelCase : List[Any] = 0 # Doctest custom flag to ignore output. lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""") lowerCAmelCase__ = doctest.OutputChecker class lowercase ( _lowercase ): """simple docstring""" def A__ ( self , __snake_case , __snake_case , __snake_case): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case) lowerCAmelCase__ = CustomOutputChecker lowerCAmelCase__ = HfDoctestModule lowerCAmelCase__ = HfDocTestParser
648
1
import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
648
lowerCAmelCase__ = range(2, 2_0 + 1) lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)] lowerCAmelCase__ = {} def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) _UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) _UpperCamelCase , _UpperCamelCase : Dict = 0, 0 _UpperCamelCase : Optional[int] = n - i _UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ ) if sub_memo is not None: _UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over _UpperCamelCase : str = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCamelCase : Optional[Any] = _k break if max_jump >= 0: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCamelCase : Tuple = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: _UpperCamelCase : Union[str, Any] = [] else: _UpperCamelCase : List[Any] = {c: []} _UpperCamelCase : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped _UpperCamelCase : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCamelCase : Union[str, Any] = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCamelCase : Any = i _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCamelCase : Union[str, Any] = ds_c + ds_b diff += addend _UpperCamelCase : Union[str, Any] = 0 for j in range(UpperCAmelCase_ ): _UpperCamelCase : Union[str, Any] = a_i[j] + addend _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): _UpperCamelCase : List[str] = digits[j] + addend if s >= 1_0: _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) _UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient else: _UpperCamelCase : Dict = s _UpperCamelCase : Optional[Any] = addend // 1_0 if addend == 0: break while addend > 0: _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [1] _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : int = 0 while True: _UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break _UpperCamelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'{solution() = }')
648
1
from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase__ = { """vocab_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json""" }, """merges_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt""" }, } lowerCAmelCase__ = {"""allegro/herbert-base-cased""": 5_1_4} lowerCAmelCase__ = {} class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_INIT_CONFIGURATION a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = HerbertTokenizer def __init__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case="</s>" , **__snake_case , ): super().__init__( __snake_case , __snake_case , tokenizer_file=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sep_token=__snake_case , **__snake_case , ) def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Tuple = [self.cls_token_id] _UpperCamelCase : Tuple = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is None: return [1] + ([0] * len(__snake_case)) + [1] return [1] + ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : str = [self.sep_token_id] _UpperCamelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : str = self._tokenizer.model.save(__snake_case , name=__snake_case) return tuple(__snake_case)
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowercase ( _lowercase ): """simple docstring""" a__ = "vit_mae" def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : str = hidden_act _UpperCamelCase : List[str] = hidden_dropout_prob _UpperCamelCase : List[Any] = attention_probs_dropout_prob _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : int = image_size _UpperCamelCase : Any = patch_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : Union[str, Any] = qkv_bias _UpperCamelCase : str = decoder_num_attention_heads _UpperCamelCase : Union[str, Any] = decoder_hidden_size _UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers _UpperCamelCase : Any = decoder_intermediate_size _UpperCamelCase : int = mask_ratio _UpperCamelCase : List[Any] = norm_pix_loss
648
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Any: '''simple docstring''' _UpperCamelCase : Tuple = fname.split(os.path.sep )[-1] return re.search(R'^(.*)_\d+\.jpg$' , UpperCAmelCase_ ).groups()[0] class lowercase ( _lowercase ): """simple docstring""" def __init__( self , __snake_case , __snake_case=None , __snake_case=None): _UpperCamelCase : List[str] = file_names _UpperCamelCase : Union[str, Any] = image_transform _UpperCamelCase : Tuple = label_to_id def __len__( self): return len(self.file_names) def __getitem__( self , __snake_case): _UpperCamelCase : List[Any] = self.file_names[idx] _UpperCamelCase : List[Any] = PIL.Image.open(__snake_case) _UpperCamelCase : List[str] = raw_image.convert('RGB') if self.image_transform is not None: _UpperCamelCase : List[Any] = self.image_transform(__snake_case) _UpperCamelCase : Any = extract_label(__snake_case) if self.label_to_id is not None: _UpperCamelCase : Optional[int] = self.label_to_id[label] return {"image": image, "label": label} def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Dict: '''simple docstring''' if args.with_tracking: _UpperCamelCase : str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir ) else: _UpperCamelCase : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase : List[str] = config['lr'] _UpperCamelCase : List[Any] = int(config['num_epochs'] ) _UpperCamelCase : Dict = int(config['seed'] ) _UpperCamelCase : Union[str, Any] = int(config['batch_size'] ) _UpperCamelCase : Optional[Any] = config['image_size'] if not isinstance(UpperCAmelCase_ , (list, tuple) ): _UpperCamelCase : List[str] = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , 'isdigit' ): if args.checkpointing_steps == "epoch": _UpperCamelCase : Dict = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _UpperCamelCase : Dict = int(args.checkpointing_steps ) else: raise ValueError( F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: _UpperCamelCase : Dict = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _UpperCamelCase : Dict = os.path.split(UpperCAmelCase_ )[-1].split('.' )[0] accelerator.init_trackers(UpperCAmelCase_ , UpperCAmelCase_ ) # Grab all the image filenames _UpperCamelCase : Tuple = [os.path.join(args.data_dir , UpperCAmelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )] # Build the label correspondences _UpperCamelCase : Union[str, Any] = [extract_label(UpperCAmelCase_ ) for fname in file_names] _UpperCamelCase : Union[str, Any] = list(set(UpperCAmelCase_ ) ) id_to_label.sort() _UpperCamelCase : List[str] = {lbl: i for i, lbl in enumerate(UpperCAmelCase_ )} # Set the seed before splitting the data. np.random.seed(UpperCAmelCase_ ) torch.manual_seed(UpperCAmelCase_ ) torch.cuda.manual_seed_all(UpperCAmelCase_ ) # Split our filenames between train and validation _UpperCamelCase : Optional[Any] = np.random.permutation(len(UpperCAmelCase_ ) ) _UpperCamelCase : Union[str, Any] = int(0.8 * len(UpperCAmelCase_ ) ) _UpperCamelCase : Optional[int] = random_perm[:cut] _UpperCamelCase : Tuple = random_perm[cut:] # For training we use a simple RandomResizedCrop _UpperCamelCase : Optional[int] = Compose([RandomResizedCrop(UpperCAmelCase_ , scale=(0.5, 1.0) ), ToTensor()] ) _UpperCamelCase : Union[str, Any] = PetsDataset( [file_names[i] for i in train_split] , image_transform=UpperCAmelCase_ , label_to_id=UpperCAmelCase_ ) # For evaluation, we use a deterministic Resize _UpperCamelCase : Optional[int] = Compose([Resize(UpperCAmelCase_ ), ToTensor()] ) _UpperCamelCase : Tuple = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCAmelCase_ , label_to_id=UpperCAmelCase_ ) # Instantiate dataloaders. _UpperCamelCase : Optional[Any] = DataLoader(UpperCAmelCase_ , shuffle=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , num_workers=4 ) _UpperCamelCase : int = DataLoader(UpperCAmelCase_ , shuffle=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase : Dict = create_model('resnet50d' , pretrained=UpperCAmelCase_ , num_classes=len(UpperCAmelCase_ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCamelCase : Any = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _UpperCamelCase : List[str] = False for param in model.get_classifier().parameters(): _UpperCamelCase : str = True # We normalize the batches of images to be a bit faster. _UpperCamelCase : int = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device ) _UpperCamelCase : List[Any] = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _UpperCamelCase : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=lr / 2_5 ) # Instantiate learning rate scheduler _UpperCamelCase : List[Any] = OneCycleLR(optimizer=UpperCAmelCase_ , max_lr=UpperCAmelCase_ , epochs=UpperCAmelCase_ , steps_per_epoch=len(UpperCAmelCase_ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = accelerator.prepare( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # We need to keep track of how many total steps we have iterated over _UpperCamelCase : List[Any] = 0 # We also need to keep track of the starting epoch so files are named properly _UpperCamelCase : Dict = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) _UpperCamelCase : List[str] = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _UpperCamelCase : Tuple = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _UpperCamelCase : Optional[Any] = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _UpperCamelCase : Union[str, Any] = os.path.splitext(UpperCAmelCase_ )[0] if "epoch" in training_difference: _UpperCamelCase : str = int(training_difference.replace('epoch_' , '' ) ) + 1 _UpperCamelCase : str = None else: _UpperCamelCase : int = int(training_difference.replace('step_' , '' ) ) _UpperCamelCase : Tuple = resume_step // len(UpperCAmelCase_ ) resume_step -= starting_epoch * len(UpperCAmelCase_ ) # Now we train the model for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ): model.train() if args.with_tracking: _UpperCamelCase : Optional[int] = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _UpperCamelCase : Dict = accelerator.skip_first_batches(UpperCAmelCase_ , UpperCAmelCase_ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _UpperCamelCase : List[str] = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _UpperCamelCase : Any = {k: v.to(accelerator.device ) for k, v in batch.items()} _UpperCamelCase : Union[str, Any] = (batch['image'] - mean) / std _UpperCamelCase : List[Any] = model(UpperCAmelCase_ ) _UpperCamelCase : Dict = torch.nn.functional.cross_entropy(UpperCAmelCase_ , batch['label'] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(UpperCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Tuple = F'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _UpperCamelCase : List[str] = os.path.join(args.output_dir , UpperCAmelCase_ ) accelerator.save_state(UpperCAmelCase_ ) model.eval() _UpperCamelCase : Dict = 0 _UpperCamelCase : Any = 0 for step, batch in enumerate(UpperCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. _UpperCamelCase : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()} _UpperCamelCase : Dict = (batch['image'] - mean) / std with torch.no_grad(): _UpperCamelCase : Optional[Any] = model(UpperCAmelCase_ ) _UpperCamelCase : List[str] = outputs.argmax(dim=-1 ) _UpperCamelCase , _UpperCamelCase : Any = accelerator.gather_for_metrics((predictions, batch['label']) ) _UpperCamelCase : Optional[int] = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _UpperCamelCase : Dict = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}: {1_0_0 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { 'accuracy': 1_0_0 * eval_metric, 'train_loss': total_loss.item() / len(UpperCAmelCase_ ), 'epoch': epoch, } , step=UpperCAmelCase_ , ) if checkpointing_steps == "epoch": _UpperCamelCase : Any = F'''epoch_{epoch}''' if args.output_dir is not None: _UpperCamelCase : str = os.path.join(args.output_dir , UpperCAmelCase_ ) accelerator.save_state(UpperCAmelCase_ ) if args.with_tracking: accelerator.end_training() def lowerCamelCase_ ( ) -> Tuple: '''simple docstring''' _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument('--data_dir' , required=UpperCAmelCase_ , help='The data folder on disk.' ) parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' ) parser.add_argument( '--mixed_precision' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) parser.add_argument( '--checkpointing_steps' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , ) parser.add_argument( '--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--resume_from_checkpoint' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If the training should continue from a checkpoint folder.' , ) parser.add_argument( '--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , ) parser.add_argument( '--project_dir' , type=UpperCAmelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , ) _UpperCamelCase : str = parser.parse_args() _UpperCamelCase : Optional[int] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 6_4, 'image_size': 2_2_4} training_function(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": main()
648
import functools def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int: '''simple docstring''' if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase_ ) == 0: return 0 if min(UpperCAmelCase_ ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase_ ) >= 3_6_6: raise ValueError('All days elements should be less than 366' ) _UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ ) @functools.cache def dynamic_programming(UpperCAmelCase_ : int ) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
648
1
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder lowerCAmelCase__ = """__DUMMY_TRANSFORMERS_USER__""" lowerCAmelCase__ = """Dummy User""" lowerCAmelCase__ = """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""" lowerCAmelCase__ = """https://hub-ci.huggingface.co""" lowerCAmelCase__ = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" lowerCAmelCase__ = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" lowerCAmelCase__ = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Dict: '''simple docstring''' monkeypatch.setattr( 'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , UpperCAmelCase_ ) @pytest.fixture def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Optional[Any]: '''simple docstring''' monkeypatch.setattr('datasets.config.HF_ENDPOINT' , UpperCAmelCase_ ) monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , UpperCAmelCase_ ) @pytest.fixture def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , UpperCAmelCase_ ) @pytest.fixture def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' HfFolder.save_token(UpperCAmelCase_ ) yield HfFolder.delete_token() @pytest.fixture(scope='session' ) def lowerCamelCase_ ( ) -> int: '''simple docstring''' return HfApi(endpoint=UpperCAmelCase_ ) @pytest.fixture(scope='session' ) def lowerCamelCase_ ( UpperCAmelCase_ : HfApi ) -> List[Any]: '''simple docstring''' _UpperCamelCase : str = HfFolder.get_token() HfFolder.save_token(UpperCAmelCase_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(UpperCAmelCase_ ) @pytest.fixture def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Any: '''simple docstring''' def _cleanup_repo(UpperCAmelCase_ : List[str] ): hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' ) return _cleanup_repo @pytest.fixture def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> int: '''simple docstring''' @contextmanager def _temporary_repo(UpperCAmelCase_ : Union[str, Any] ): try: yield repo_id finally: cleanup_repo(UpperCAmelCase_ ) return _temporary_repo @pytest.fixture(scope='session' ) def lowerCamelCase_ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any ) -> str: '''simple docstring''' _UpperCamelCase : Union[str, Any] = F'''repo_txt_data-{int(time.time() * 1_0e3 )}''' _UpperCamelCase : Union[str, Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ ) hf_api.upload_file( token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data/text_data.txt' , repo_id=UpperCAmelCase_ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> Union[str, Any]: '''simple docstring''' return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='session' ) def lowerCamelCase_ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] ) -> str: '''simple docstring''' _UpperCamelCase : Dict = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}''' _UpperCamelCase : str = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ ) hf_api.upload_file( token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data.zip' , repo_id=UpperCAmelCase_ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ) -> str: '''simple docstring''' return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='session' ) def lowerCamelCase_ ( UpperCAmelCase_ : HfApi , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict ) -> int: '''simple docstring''' _UpperCamelCase : str = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}''' _UpperCamelCase : int = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' , private=UpperCAmelCase_ ) hf_api.upload_file( token=UpperCAmelCase_ , path_or_fileobj=str(UpperCAmelCase_ ) , path_in_repo='data.zip' , repo_id=UpperCAmelCase_ , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(UpperCAmelCase_ , token=UpperCAmelCase_ , repo_type='dataset' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> List[str]: '''simple docstring''' return hf_private_dataset_repo_zipped_img_data_
648
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Optional[Any] = batch_size _UpperCamelCase : int = seq_length _UpperCamelCase : str = is_training _UpperCamelCase : Tuple = use_input_mask _UpperCamelCase : Union[str, Any] = use_token_type_ids _UpperCamelCase : Union[str, Any] = use_labels _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[Any] = embedding_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : int = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : Tuple = hidden_dropout_prob _UpperCamelCase : int = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : List[str] = type_vocab_size _UpperCamelCase : Dict = type_sequence_label_size _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : Optional[Any] = num_labels _UpperCamelCase : Tuple = num_choices _UpperCamelCase : List[str] = scope def A__ ( self): _UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase : Any = None if self.use_input_mask: _UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase : Optional[Any] = None if self.use_token_type_ids: _UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase : int = None _UpperCamelCase : List[str] = None _UpperCamelCase : Dict = None if self.use_labels: _UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[str] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[int] = self.num_labels _UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Any = self.num_labels _UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = self.num_choices _UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) : Optional[int] = config_and_inputs _UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True # test_resize_embeddings = False a__ = False def A__ ( self , __snake_case , __snake_case , __snake_case=False): _UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case) if return_labels: if model_class in get_values(__snake_case): _UpperCamelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case) _UpperCamelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case) return inputs_dict def A__ ( self): _UpperCamelCase : Any = MegatronBertModelTester(self) _UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37) def A__ ( self): self.config_tester.run_common_tests() def A__ ( self): _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case) def A__ ( self): _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case) def A__ ( self): _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case) def A__ ( self): _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case) def A__ ( self): _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]: '''simple docstring''' return torch.tensor( UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , ) lowerCAmelCase__ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.') def A__ ( self): _UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: _UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case) _UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case) model.to(__snake_case) model.half() _UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): _UpperCamelCase : str = model(__snake_case)[0] _UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24)) self.assertEqual(output.shape , __snake_case) _UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3): for jj in range(3): _UpperCamelCase : Optional[Any] = output[0, ii, jj] _UpperCamelCase : Dict = expected[3 * ii + jj] _UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
648
1
from __future__ import annotations def lowerCamelCase_ ( UpperCAmelCase_ : int | float | str , UpperCAmelCase_ : int | float | str ) -> list[str]: '''simple docstring''' if nth_term == "": return [""] _UpperCamelCase : str = int(UpperCAmelCase_ ) _UpperCamelCase : Tuple = int(UpperCAmelCase_ ) _UpperCamelCase : list[str] = [] for temp in range(int(UpperCAmelCase_ ) ): series.append(F'''1 / {pow(temp + 1 , int(UpperCAmelCase_ ) )}''' if series else '1' ) return series if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = int(input("""Enter the last number (nth term) of the P-Series""")) lowerCAmelCase__ = int(input("""Enter the power for P-Series""")) print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""") print(p_series(nth_term, power))
648
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = """▁""" lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase__ = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } lowerCAmelCase__ = { """xlm-roberta-base""": 5_1_2, """xlm-roberta-large""": 5_1_2, """xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2, """xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2, """xlm-roberta-large-finetuned-conll03-english""": 5_1_2, """xlm-roberta-large-finetuned-conll03-german""": 5_1_2, } class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token _UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) _UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__snake_case)) _UpperCamelCase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase : List[Any] = 1 _UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset _UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self): _UpperCamelCase : List[Any] = self.__dict__.copy() _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self , __snake_case): _UpperCamelCase : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCamelCase : Tuple = {} _UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def A__ ( self , __snake_case , __snake_case = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase : Tuple = [self.cls_token_id] _UpperCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is None: return [1] + ([0] * len(__snake_case)) + [1] return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def A__ ( self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def A__ ( self): _UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def A__ ( self , __snake_case): return self.sp_model.encode(__snake_case , out_type=__snake_case) def A__ ( self , __snake_case): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase : str = self.sp_model.PieceToId(__snake_case) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , __snake_case): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def A__ ( self , __snake_case): _UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip() return out_string def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCamelCase : str = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __snake_case) elif not os.path.isfile(self.vocab_file): with open(__snake_case , 'wb') as fi: _UpperCamelCase : Any = self.sp_model.serialized_model_proto() fi.write(__snake_case) return (out_vocab_file,)
648
1
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path lowerCAmelCase__ = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any]=True ) -> Any: '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowercase ) ) class lowercase ( _lowercase ): """simple docstring""" a__ = None a__ = None def A__ ( self , __snake_case , __snake_case): with TemporaryDirectory() as tmp_dir: _UpperCamelCase : List[Any] = dataset_module_factory(__snake_case , cache_dir=__snake_case) _UpperCamelCase : int = import_main_class(dataset_module.module_path , dataset=__snake_case) _UpperCamelCase : DatasetBuilder = builder_cls( cache_dir=__snake_case , config_name=__snake_case , hash=dataset_module.hash , ) _UpperCamelCase : Union[str, Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=__snake_case).replace(os.sep , '/'), config.DATASET_INFO_FILENAME, ]) _UpperCamelCase : Tuple = cached_path(__snake_case , cache_dir=__snake_case) self.assertTrue(os.path.exists(__snake_case)) @pytest.mark.integration def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> List[str]: '''simple docstring''' _UpperCamelCase : Dict = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' _UpperCamelCase : Optional[Any] = dataset_module_factory('wikipedia' , cache_dir=UpperCAmelCase_ ) _UpperCamelCase : Tuple = import_main_class(dataset_module.module_path ) _UpperCamelCase : DatasetBuilder = builder_cls( cache_dir=UpperCAmelCase_ , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _UpperCamelCase : Dict = None builder_instance.download_and_prepare() _UpperCamelCase : Optional[Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : Tuple = dataset_module_factory('wikipedia' , cache_dir=UpperCAmelCase_ ) _UpperCamelCase : Tuple = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase_ ) _UpperCamelCase : DatasetBuilder = builder_cls( cache_dir=UpperCAmelCase_ , config_name='20220301.frr' , hash=dataset_module.hash , ) _UpperCamelCase : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) assert "train" in ds assert isinstance(ds['train'] , UpperCAmelCase_ ) assert next(iter(ds['train'] ) )
648
from ...processing_utils import ProcessorMixin class lowercase ( _lowercase ): """simple docstring""" a__ = ["image_processor", "feature_extractor"] a__ = "TvltImageProcessor" a__ = "TvltFeatureExtractor" def __init__( self , __snake_case , __snake_case): super().__init__(image_processor=__snake_case , feature_extractor=__snake_case) _UpperCamelCase : List[str] = image_processor _UpperCamelCase : Dict = feature_extractor def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') _UpperCamelCase : Union[str, Any] = None if images is not None: _UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case) if images_mixed is not None: _UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case) if audio is not None: _UpperCamelCase : Tuple = self.feature_extractor( __snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case) _UpperCamelCase : Tuple = {} if audio is not None: output_dict.update(__snake_case) if images is not None: output_dict.update(__snake_case) if images_mixed_dict is not None: output_dict.update(__snake_case) return output_dict @property def A__ ( self): _UpperCamelCase : List[Any] = self.image_processor.model_input_names _UpperCamelCase : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
648
1
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class lowercase ( _lowercase ): """simple docstring""" a__ = 42 a__ = 42 def __init__( self , __snake_case , __snake_case): super().__init__() self.register_modules(unet=__snake_case , scheduler=__snake_case) @torch.no_grad() def __call__( self , __snake_case = 1 , __snake_case = 50 , __snake_case = None , __snake_case = "pil" , __snake_case = True , **__snake_case , ): _UpperCamelCase : Tuple = self.unet.config.sample_size _UpperCamelCase : Optional[int] = (batch_size, 3, img_size, img_size) _UpperCamelCase : str = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) _UpperCamelCase : List[str] = randn_tensor(__snake_case , generator=__snake_case , device=self.device) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(__snake_case) for t in self.progress_bar(self.scheduler.timesteps): # here sigma_t == t_i from the paper _UpperCamelCase : List[Any] = self.scheduler.schedule[t] _UpperCamelCase : List[Any] = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat _UpperCamelCase , _UpperCamelCase : Optional[int] = self.scheduler.add_noise_to_input(__snake_case , __snake_case , generator=__snake_case) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. _UpperCamelCase : Tuple = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev _UpperCamelCase : Tuple = self.scheduler.step(__snake_case , __snake_case , __snake_case , __snake_case) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. _UpperCamelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2).sample _UpperCamelCase : int = self.scheduler.step_correct( __snake_case , __snake_case , __snake_case , __snake_case , step_output.prev_sample , step_output['derivative'] , ) _UpperCamelCase : Any = step_output.prev_sample _UpperCamelCase : int = (sample / 2 + 0.5).clamp(0 , 1) _UpperCamelCase : List[Any] = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _UpperCamelCase : Any = self.numpy_to_pil(__snake_case) if not return_dict: return (image,) return ImagePipelineOutput(images=__snake_case)
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase ( _lowercase ): """simple docstring""" a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ): _UpperCamelCase : str = vocab_size _UpperCamelCase : int = context_length _UpperCamelCase : Tuple = hidden_size _UpperCamelCase : Tuple = num_hidden_layers _UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Dict = rescale_every _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
648
1
import flax.linen as nn import jax import jax.numpy as jnp class lowercase ( nn.Module ): """simple docstring""" a__ = 42 a__ = jnp.floataa def A__ ( self): _UpperCamelCase : str = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , __snake_case): _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = hidden_states.shape _UpperCamelCase : Union[str, Any] = jax.image.resize( __snake_case , shape=(batch, height * 2, width * 2, channels) , method='nearest' , ) _UpperCamelCase : Dict = self.conv(__snake_case) return hidden_states class lowercase ( nn.Module ): """simple docstring""" a__ = 42 a__ = jnp.floataa def A__ ( self): _UpperCamelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , __snake_case): # pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim # hidden_states = jnp.pad(hidden_states, pad_width=pad) _UpperCamelCase : Any = self.conv(__snake_case) return hidden_states class lowercase ( nn.Module ): """simple docstring""" a__ = 42 a__ = None a__ = 0.0 a__ = None a__ = jnp.floataa def A__ ( self): _UpperCamelCase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels _UpperCamelCase : Any = nn.GroupNorm(num_groups=32 , epsilon=1e-5) _UpperCamelCase : Tuple = nn.Conv( __snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCamelCase : int = nn.Dense(__snake_case , dtype=self.dtype) _UpperCamelCase : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5) _UpperCamelCase : Tuple = nn.Dropout(self.dropout_prob) _UpperCamelCase : List[str] = nn.Conv( __snake_case , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) _UpperCamelCase : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut _UpperCamelCase : str = None if use_nin_shortcut: _UpperCamelCase : Optional[int] = nn.Conv( __snake_case , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , ) def __call__( self , __snake_case , __snake_case , __snake_case=True): _UpperCamelCase : str = hidden_states _UpperCamelCase : Optional[Any] = self.norma(__snake_case) _UpperCamelCase : List[Any] = nn.swish(__snake_case) _UpperCamelCase : List[Any] = self.conva(__snake_case) _UpperCamelCase : List[str] = self.time_emb_proj(nn.swish(__snake_case)) _UpperCamelCase : Dict = jnp.expand_dims(jnp.expand_dims(__snake_case , 1) , 1) _UpperCamelCase : str = hidden_states + temb _UpperCamelCase : Optional[int] = self.norma(__snake_case) _UpperCamelCase : List[Any] = nn.swish(__snake_case) _UpperCamelCase : Optional[Any] = self.dropout(__snake_case , __snake_case) _UpperCamelCase : Tuple = self.conva(__snake_case) if self.conv_shortcut is not None: _UpperCamelCase : Dict = self.conv_shortcut(__snake_case) return hidden_states + residual
648
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class lowercase ( _lowercase ): """simple docstring""" a__ = "bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : int = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : List[str] = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : Optional[Any] = intermediate_size _UpperCamelCase : Union[str, Any] = hidden_dropout_prob _UpperCamelCase : Tuple = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : List[str] = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Any = use_cache _UpperCamelCase : Any = classifier_dropout class lowercase ( _lowercase ): """simple docstring""" @property def A__ ( self): if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
648
1
from __future__ import annotations import math def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> bool: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(UpperCAmelCase_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> list[int]: '''simple docstring''' _UpperCamelCase : Any = str(UpperCAmelCase_ ) _UpperCamelCase : str = [n] for i in range(1 , len(UpperCAmelCase_ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> bool: '''simple docstring''' if len(str(UpperCAmelCase_ ) ) > 3: if not is_prime(int(str(UpperCAmelCase_ )[-3:] ) ) or not is_prime(int(str(UpperCAmelCase_ )[:3] ) ): return False return True def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_1 ) -> list[int]: '''simple docstring''' _UpperCamelCase : list[int] = [] _UpperCamelCase : Any = 1_3 while len(UpperCAmelCase_ ) != count: if validate(UpperCAmelCase_ ): _UpperCamelCase : List[str] = list_truncated_nums(UpperCAmelCase_ ) if all(is_prime(UpperCAmelCase_ ) for i in list_nums ): list_truncated_primes.append(UpperCAmelCase_ ) num += 2 return list_truncated_primes def lowerCamelCase_ ( ) -> int: '''simple docstring''' return sum(compute_truncated_primes(1_1 ) ) if __name__ == "__main__": print(f'{sum(compute_truncated_primes(1_1)) = }')
648
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase ( _lowercase ): """simple docstring""" a__ = "facebook/bart-large-mnli" a__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a__ = "text_classifier" a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ["text", ["text"]] a__ = ["text"] def A__ ( self): super().setup() _UpperCamelCase : List[Any] = self.model.config _UpperCamelCase : Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): _UpperCamelCase : Tuple = int(__snake_case) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : List[Any] = labels return self.pre_processor( [text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A__ ( self , __snake_case): _UpperCamelCase : str = outputs.logits _UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
648
1
import string def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' _UpperCamelCase : str = '' for i in sequence: _UpperCamelCase : List[str] = ord(UpperCAmelCase_ ) if 6_5 <= extract <= 9_0: output += chr(1_5_5 - extract ) elif 9_7 <= extract <= 1_2_2: output += chr(2_1_9 - extract ) else: output += i return output def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' _UpperCamelCase : int = string.ascii_letters _UpperCamelCase : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1] return "".join( letters_reversed[letters.index(UpperCAmelCase_ )] if c in letters else c for c in sequence ) def lowerCamelCase_ ( ) -> None: '''simple docstring''' from timeit import timeit print('Running performance benchmarks...' ) _UpperCamelCase : Optional[int] = 'from string import printable ; from __main__ import atbash, atbash_slow' print(F'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=UpperCAmelCase_ )} seconds''' ) print(F'''> atbash(): {timeit("atbash(printable)" , setup=UpperCAmelCase_ )} seconds''' ) if __name__ == "__main__": for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"): print(f'{example} encrypted in atbash: {atbash(example)}') benchmark()
648
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): debug_launcher(test_script.main) def A__ ( self): debug_launcher(test_ops.main)
648
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
648
1
from __future__ import annotations from math import pi def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> dict[str, float]: '''simple docstring''' if (inductance, frequency, reactance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if inductance < 0: raise ValueError('Inductance cannot be negative' ) if frequency < 0: raise ValueError('Frequency cannot be negative' ) if reactance < 0: raise ValueError('Inductive reactance cannot be negative' ) if inductance == 0: return {"inductance": reactance / (2 * pi * frequency)} elif frequency == 0: return {"frequency": reactance / (2 * pi * inductance)} elif reactance == 0: return {"reactance": 2 * pi * frequency * inductance} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
648
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowerCAmelCase__ = 5 lowerCAmelCase__ = 1_0 @require_sentencepiece @require_tokenizers class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = SpeechaTextTokenizer a__ = False a__ = True def A__ ( self): super().setUp() _UpperCamelCase : Any = sp.SentencePieceProcessor() spm_model.Load(__snake_case) _UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))] _UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case)))) _UpperCamelCase : Tuple = Path(self.tmpdirname) save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file']) _UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def A__ ( self): _UpperCamelCase : str = '<pad>' _UpperCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(__snake_case) , 10_01) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_01) def A__ ( self): _UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) _UpperCamelCase : List[str] = tokenizer.tokenize('This is a test') self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , ) _UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case) self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8]) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def A__ ( self): # fmt: off _UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" a__ = "valhalla/s2t_mustc_multilinguial_medium" a__ = "C'est trop cool" a__ = "Esto es genial" @classmethod def A__ ( cls): _UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def A__ ( self): self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11) def A__ ( self): self.assertEqual(self.tokenizer.vocab_size , 1_00_00) def A__ ( self): self.assertIn(__snake_case , self.tokenizer.all_special_ids) _UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2] _UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case) _UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case) self.assertEqual(__snake_case , __snake_case) self.assertNotIn(self.tokenizer.eos_token , __snake_case) def A__ ( self): _UpperCamelCase : Any = 'fr' _UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , __snake_case) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def A__ ( self): _UpperCamelCase : Union[str, Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) _UpperCamelCase : List[str] = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
648
1
import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = TextToVideoSDPipeline a__ = TEXT_TO_IMAGE_PARAMS a__ = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. a__ = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def A__ ( self): torch.manual_seed(0) _UpperCamelCase : List[str] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , ) _UpperCamelCase : List[str] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__snake_case , set_alpha_to_one=__snake_case , ) torch.manual_seed(0) _UpperCamelCase : Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0) _UpperCamelCase : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) _UpperCamelCase : Any = CLIPTextModel(__snake_case) _UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') _UpperCamelCase : Tuple = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def A__ ( self , __snake_case , __snake_case=0): if str(__snake_case).startswith('mps'): _UpperCamelCase : Optional[Any] = torch.manual_seed(__snake_case) else: _UpperCamelCase : Dict = torch.Generator(device=__snake_case).manual_seed(__snake_case) _UpperCamelCase : List[str] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def A__ ( self): _UpperCamelCase : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator _UpperCamelCase : List[str] = self.get_dummy_components() _UpperCamelCase : Optional[int] = TextToVideoSDPipeline(**__snake_case) _UpperCamelCase : Any = sd_pipe.to(__snake_case) sd_pipe.set_progress_bar_config(disable=__snake_case) _UpperCamelCase : Any = self.get_dummy_inputs(__snake_case) _UpperCamelCase : Dict = 'np' _UpperCamelCase : List[Any] = sd_pipe(**__snake_case).frames _UpperCamelCase : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) _UpperCamelCase : Dict = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def A__ ( self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=3e-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A__ ( self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=1e-2) @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.') def A__ ( self): pass @unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.') def A__ ( self): pass @unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.') def A__ ( self): pass def A__ ( self): return super().test_progress_bar() @slow @skip_mps class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): _UpperCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy') _UpperCamelCase : Any = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b') _UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) _UpperCamelCase : Optional[int] = pipe.to('cuda') _UpperCamelCase : List[str] = 'Spiderman is surfing' _UpperCamelCase : str = torch.Generator(device='cpu').manual_seed(0) _UpperCamelCase : Any = pipe(__snake_case , generator=__snake_case , num_inference_steps=25 , output_type='pt').frames _UpperCamelCase : Union[str, Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2 def A__ ( self): _UpperCamelCase : List[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy') _UpperCamelCase : List[str] = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b') _UpperCamelCase : List[str] = pipe.to('cuda') _UpperCamelCase : List[Any] = 'Spiderman is surfing' _UpperCamelCase : Tuple = torch.Generator(device='cpu').manual_seed(0) _UpperCamelCase : int = pipe(__snake_case , generator=__snake_case , num_inference_steps=2 , output_type='pt').frames _UpperCamelCase : int = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2
648
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class lowercase ( _lowercase ): """simple docstring""" a__ = "masked_bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Any = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : str = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : Dict = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : List[Any] = layer_norm_eps _UpperCamelCase : Tuple = pruning_method _UpperCamelCase : Tuple = mask_init _UpperCamelCase : Dict = mask_scale
648
1
def lowerCamelCase_ ( ) -> Any: '''simple docstring''' _UpperCamelCase : Tuple = 0 for i in range(1 , 1_0_0_1 ): total += i**i return str(UpperCAmelCase_ )[-1_0:] if __name__ == "__main__": print(solution())
648
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self , __snake_case=32): set_seed(0) _UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3) _UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1) return model, optimizer @slow def A__ ( self): _UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) _UpperCamelCase : List[Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) _UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)] _UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)] _UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)] # train with a DDPM scheduler _UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample _UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample _UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5)) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
648
1
import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """spiece.model"""} lowerCAmelCase__ = { """vocab_file""": { """xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""", """xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""", } } lowerCAmelCase__ = { """xlnet-base-cased""": None, """xlnet-large-cased""": None, } # Segments (not really needed) lowerCAmelCase__ = 0 lowerCAmelCase__ = 1 lowerCAmelCase__ = 2 lowerCAmelCase__ = 3 lowerCAmelCase__ = 4 class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = "left" def __init__( self , __snake_case , __snake_case=False , __snake_case=True , __snake_case=False , __snake_case="<s>" , __snake_case="</s>" , __snake_case="<unk>" , __snake_case="<sep>" , __snake_case="<pad>" , __snake_case="<cls>" , __snake_case="<mask>" , __snake_case=["<eop>", "<eod>"] , __snake_case = None , **__snake_case , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : str = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token _UpperCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__snake_case , remove_space=__snake_case , keep_accents=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , pad_token=__snake_case , cls_token=__snake_case , mask_token=__snake_case , additional_special_tokens=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) _UpperCamelCase : Any = 3 _UpperCamelCase : List[Any] = do_lower_case _UpperCamelCase : int = remove_space _UpperCamelCase : Union[str, Any] = keep_accents _UpperCamelCase : List[str] = vocab_file _UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(__snake_case) @property def A__ ( self): return len(self.sp_model) def A__ ( self): _UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self): _UpperCamelCase : Dict = self.__dict__.copy() _UpperCamelCase : str = None return state def __setstate__( self , __snake_case): _UpperCamelCase : str = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCamelCase : Optional[Any] = {} _UpperCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def A__ ( self , __snake_case): if self.remove_space: _UpperCamelCase : Any = ' '.join(inputs.strip().split()) else: _UpperCamelCase : Optional[Any] = inputs _UpperCamelCase : str = outputs.replace('``' , '"').replace('\'\'' , '"') if not self.keep_accents: _UpperCamelCase : List[str] = unicodedata.normalize('NFKD' , __snake_case) _UpperCamelCase : Optional[int] = ''.join([c for c in outputs if not unicodedata.combining(__snake_case)]) if self.do_lower_case: _UpperCamelCase : Dict = outputs.lower() return outputs def A__ ( self , __snake_case): _UpperCamelCase : Any = self.preprocess_text(__snake_case) _UpperCamelCase : Dict = self.sp_model.encode(__snake_case , out_type=__snake_case) _UpperCamelCase : List[Any] = [] for piece in pieces: if len(__snake_case) > 1 and piece[-1] == str(',') and piece[-2].isdigit(): _UpperCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__snake_case , '')) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0]) == 1: _UpperCamelCase : int = cur_pieces[1:] else: _UpperCamelCase : Optional[Any] = cur_pieces[0][1:] cur_pieces.append(piece[-1]) new_pieces.extend(__snake_case) else: new_pieces.append(__snake_case) return new_pieces def A__ ( self , __snake_case): return self.sp_model.PieceToId(__snake_case) def A__ ( self , __snake_case): return self.sp_model.IdToPiece(__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : Optional[Any] = ''.join(__snake_case).replace(__snake_case , ' ').strip() return out_string def A__ ( self , __snake_case , __snake_case = False , __snake_case = None , __snake_case = True , **__snake_case , ): _UpperCamelCase : List[Any] = kwargs.pop('use_source_tokenizer' , __snake_case) _UpperCamelCase : Union[str, Any] = self.convert_ids_to_tokens(__snake_case , skip_special_tokens=__snake_case) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 _UpperCamelCase : Optional[int] = [] _UpperCamelCase : Dict = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__snake_case)) _UpperCamelCase : List[Any] = [] sub_texts.append(__snake_case) else: current_sub_text.append(__snake_case) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(__snake_case)) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens _UpperCamelCase : Optional[Any] = ''.join(__snake_case) _UpperCamelCase : Any = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: _UpperCamelCase : int = self.clean_up_tokenization(__snake_case) return clean_text else: return text def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Optional[int] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is not None: return ([0] * len(__snake_case)) + [1] + ([0] * len(__snake_case)) + [1, 1] return ([0] * len(__snake_case)) + [1, 1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Any = [2] if token_ids_a is None: return len(token_ids_a + sep) * [0] + cls_segment_id return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCamelCase : Tuple = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __snake_case) elif not os.path.isfile(self.vocab_file): with open(__snake_case , 'wb') as fi: _UpperCamelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(__snake_case) return (out_vocab_file,)
648
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase__ = { """sample_size""": 3_2, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [3_2, 6_4], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 6_4, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 2_5_6, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """num_train_timesteps""": 4_0, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 1_5_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str: '''simple docstring''' _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias'''] _UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Optional[Any] = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : Union[str, Any] = {} _UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight'] _UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias'] _UpperCamelCase : Dict = checkpoint['time_embed.2.weight'] _UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _UpperCamelCase : List[str] = checkpoint['label_emb.weight'] _UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight'] _UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _UpperCamelCase : Optional[int] = unet_config['down_block_types'] _UpperCamelCase : Optional[Any] = unet_config['layers_per_block'] _UpperCamelCase : Dict = unet_config['attention_head_dim'] _UpperCamelCase : List[str] = unet_config['block_out_channels'] _UpperCamelCase : str = 1 _UpperCamelCase : Optional[int] = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): _UpperCamelCase : List[str] = channels_list[i] _UpperCamelCase : str = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : str = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1''' _UpperCamelCase : Dict = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 _UpperCamelCase : Tuple = current_channels # hardcoded the mid-block for now _UpperCamelCase : Any = 'mid_block.resnets.0' _UpperCamelCase : Optional[Any] = 'middle_block.0' _UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = 'mid_block.attentions.0' _UpperCamelCase : Tuple = 'middle_block.1' _UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Tuple = 'mid_block.resnets.1' _UpperCamelCase : str = 'middle_block.2' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Optional[int] = unet_config['up_block_types'] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}''' _UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1''' _UpperCamelCase : Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2''' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = checkpoint['out.0.weight'] _UpperCamelCase : str = checkpoint['out.0.bias'] _UpperCamelCase : int = checkpoint['out.2.weight'] _UpperCamelCase : List[Any] = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = strabool(args.class_cond) lowerCAmelCase__ = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase__ = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: lowerCAmelCase__ = None lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
648
1
# Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[int]: '''simple docstring''' return 1 / (1 + np.exp(-z )) def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) -> List[str]: '''simple docstring''' return (-y * np.log(UpperCAmelCase_ ) - (1 - y) * np.log(1 - h )).mean() def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) return np.sum(y * scores - np.log(1 + np.exp(UpperCAmelCase_ ) ) ) def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=7_0_0_0_0 ) -> List[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = np.zeros(x.shape[1] ) for iterations in range(UpperCAmelCase_ ): _UpperCamelCase : Any = np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[str] = sigmoid_function(UpperCAmelCase_ ) _UpperCamelCase : List[Any] = np.dot(x.T , h - y ) / y.size _UpperCamelCase : Optional[Any] = theta - alpha * gradient # updating the weights _UpperCamelCase : Optional[Any] = np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = sigmoid_function(UpperCAmelCase_ ) _UpperCamelCase : Tuple = cost_function(UpperCAmelCase_ , UpperCAmelCase_ ) if iterations % 1_0_0 == 0: print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": lowerCAmelCase__ = datasets.load_iris() lowerCAmelCase__ = iris.data[:, :2] lowerCAmelCase__ = (iris.target != 0) * 1 lowerCAmelCase__ = 0.1 lowerCAmelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0_0_0_0) print("""theta: """, theta) # printing the theta i.e our weights vector def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[str]: '''simple docstring''' return sigmoid_function( np.dot(UpperCAmelCase_ , UpperCAmelCase_ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(1_0, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""") plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""") ((lowerCAmelCase__) , (lowerCAmelCase__)) = (x[:, 0].min(), x[:, 0].max()) ((lowerCAmelCase__) , (lowerCAmelCase__)) = (x[:, 1].min(), x[:, 1].max()) ((lowerCAmelCase__) , (lowerCAmelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) lowerCAmelCase__ = np.c_[xxa.ravel(), xxa.ravel()] lowerCAmelCase__ = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""") plt.legend() plt.show()
648
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list: '''simple docstring''' if len(UpperCAmelCase_ ) <= 1: return [tuple(UpperCAmelCase_ )] _UpperCamelCase : List[Any] = [] def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ): _UpperCamelCase : Optional[int] = [0] * n res.append(tuple(UpperCAmelCase_ ) ) _UpperCamelCase : List[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0] else: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase_ ) ) c[i] += 1 _UpperCamelCase : Tuple = 0 else: _UpperCamelCase : Tuple = 0 i += 1 generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return res if __name__ == "__main__": lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
648
1
from ...processing_utils import ProcessorMixin class lowercase ( _lowercase ): """simple docstring""" a__ = ["image_processor", "feature_extractor"] a__ = "TvltImageProcessor" a__ = "TvltFeatureExtractor" def __init__( self , __snake_case , __snake_case): super().__init__(image_processor=__snake_case , feature_extractor=__snake_case) _UpperCamelCase : List[str] = image_processor _UpperCamelCase : Dict = feature_extractor def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') _UpperCamelCase : Union[str, Any] = None if images is not None: _UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case) if images_mixed is not None: _UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case) if audio is not None: _UpperCamelCase : Tuple = self.feature_extractor( __snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case) _UpperCamelCase : Tuple = {} if audio is not None: output_dict.update(__snake_case) if images is not None: output_dict.update(__snake_case) if images_mixed_dict is not None: output_dict.update(__snake_case) return output_dict @property def A__ ( self): _UpperCamelCase : List[Any] = self.image_processor.model_input_names _UpperCamelCase : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
648
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ ) if k.startswith('encoder' ): _UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' ) _UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): _UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' ) return k def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: _UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ ) _UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd _UpperCamelCase : Tuple = v lowerCAmelCase__ = ["""START"""] @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]: '''simple docstring''' _UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : int = model['model'] _UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ ) _UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ ) _UpperCamelCase : int = m.model.state_dict().keys() _UpperCamelCase : Union[str, Any] = [] _UpperCamelCase : int = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase : int = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCAmelCase_ ) m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) m.half() m.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) lowerCAmelCase__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
648
1
import argparse from pathlib import Path import requests import torch from PIL import Image from transformers import ( RobertaTokenizer, TrOCRConfig, TrOCRForCausalLM, TrOCRProcessor, VisionEncoderDecoderModel, ViTConfig, ViTImageProcessor, ViTModel, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ) -> str: '''simple docstring''' _UpperCamelCase : Union[str, Any] = [] for i in range(encoder_config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append( (F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') ) # cls token, position embeddings and patch embeddings of encoder rename_keys.extend( [ ('encoder.deit.cls_token', 'encoder.embeddings.cls_token'), ('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'), ('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'), ('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'), ('encoder.deit.norm.weight', 'encoder.layernorm.weight'), ('encoder.deit.norm.bias', 'encoder.layernorm.bias'), ] ) return rename_keys def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' for i in range(encoder_config.num_hidden_layers ): # queries, keys and values (only weights, no biases) _UpperCamelCase : Dict = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' ) _UpperCamelCase : List[Any] = in_proj_weight[ : encoder_config.hidden_size, : ] _UpperCamelCase : Optional[Any] = in_proj_weight[ encoder_config.hidden_size : encoder_config.hidden_size * 2, : ] _UpperCamelCase : Union[str, Any] = in_proj_weight[ -encoder_config.hidden_size :, : ] def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase : Optional[Any] = dct.pop(UpperCAmelCase_ ) _UpperCamelCase : int = val def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> str: '''simple docstring''' if "handwritten" in checkpoint_url: _UpperCamelCase : str = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let # url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" # # url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg" elif "printed" in checkpoint_url or "stage1" in checkpoint_url: _UpperCamelCase : List[Any] = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg' _UpperCamelCase : Optional[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' ) return im @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : Tuple = ViTConfig(image_size=3_8_4 , qkv_bias=UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = TrOCRConfig() # size of the architecture if "base" in checkpoint_url: _UpperCamelCase : Optional[Any] = 7_6_8 elif "large" in checkpoint_url: # use ViT-large encoder _UpperCamelCase : Any = 1_0_2_4 _UpperCamelCase : Union[str, Any] = 4_0_9_6 _UpperCamelCase : int = 2_4 _UpperCamelCase : List[str] = 1_6 _UpperCamelCase : Optional[Any] = 1_0_2_4 else: raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' ) # the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards if "large-printed" in checkpoint_url or "stage1" in checkpoint_url: _UpperCamelCase : Optional[Any] = False _UpperCamelCase : List[Any] = 'relu' _UpperCamelCase : List[Any] = 1_0_2_4 _UpperCamelCase : str = True _UpperCamelCase : Dict = False _UpperCamelCase : Any = False # load HuggingFace model _UpperCamelCase : str = ViTModel(UpperCAmelCase_ , add_pooling_layer=UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = TrOCRForCausalLM(UpperCAmelCase_ ) _UpperCamelCase : Dict = VisionEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ ) model.eval() # load state_dict of original model, rename some keys _UpperCamelCase : int = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location='cpu' , check_hash=UpperCAmelCase_ )['model'] _UpperCamelCase : List[Any] = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ ) for src, dest in rename_keys: rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ ) # remove parameters we don't need del state_dict["encoder.deit.head.weight"] del state_dict["encoder.deit.head.bias"] del state_dict["decoder.version"] # add prefix to decoder keys for key, val in state_dict.copy().items(): _UpperCamelCase : List[Any] = state_dict.pop(UpperCAmelCase_ ) if key.startswith('decoder' ) and "output_projection" not in key: _UpperCamelCase : List[str] = val else: _UpperCamelCase : Optional[Any] = val # load state dict model.load_state_dict(UpperCAmelCase_ ) # Check outputs on an image _UpperCamelCase : List[Any] = ViTImageProcessor(size=encoder_config.image_size ) _UpperCamelCase : Optional[Any] = RobertaTokenizer.from_pretrained('roberta-large' ) _UpperCamelCase : Optional[Any] = TrOCRProcessor(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Dict = processor(images=prepare_img(UpperCAmelCase_ ) , return_tensors='pt' ).pixel_values # verify logits _UpperCamelCase : int = torch.tensor([[model.config.decoder.decoder_start_token_id]] ) _UpperCamelCase : Tuple = model(pixel_values=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_ ) _UpperCamelCase : Any = outputs.logits _UpperCamelCase : Optional[Any] = torch.Size([1, 1, 5_0_2_6_5] ) if "trocr-base-handwritten" in checkpoint_url: _UpperCamelCase : Any = torch.tensor( [-1.4_5_0_2, -4.6_6_8_3, -0.5_3_4_7, -2.9_2_9_1, 9.1_4_3_5, -3.0_5_7_1, 8.9_7_6_4, 1.7_5_6_0, 8.7_3_5_8, -1.5_3_1_1] ) elif "trocr-large-handwritten" in checkpoint_url: _UpperCamelCase : Dict = torch.tensor( [-2.6_4_3_7, -1.3_1_2_9, -2.2_5_9_6, -5.3_4_5_5, 6.3_5_3_9, 1.7_6_0_4, 5.4_9_9_1, 1.4_7_0_2, 5.6_1_1_3, 2.0_1_7_0] ) elif "trocr-base-printed" in checkpoint_url: _UpperCamelCase : List[str] = torch.tensor( [-5.6_8_1_6, -5.8_3_8_8, 1.1_3_9_8, -6.9_0_3_4, 6.8_5_0_5, -2.4_3_9_3, 1.2_2_8_4, -1.0_2_3_2, -1.9_6_6_1, -3.9_2_1_0] ) elif "trocr-large-printed" in checkpoint_url: _UpperCamelCase : Optional[Any] = torch.tensor( [-6.0_1_6_2, -7.0_9_5_9, 4.4_1_5_5, -5.1_0_6_3, 7.0_4_6_8, -3.1_6_3_1, 2.6_4_6_6, -0.3_0_8_1, -0.8_1_0_6, -1.7_5_3_5] ) if "stage1" not in checkpoint_url: assert logits.shape == expected_shape, "Shape of logits not as expected" assert torch.allclose(logits[0, 0, :1_0] , UpperCAmelCase_ , atol=1e-3 ), "First elements of logits not as expected" Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase_ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_url""", default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""", type=str, help="""URL to the original PyTorch checkpoint (.pth file).""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCAmelCase__ = parser.parse_args() convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
648
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""] lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class lowercase ( tf.keras.Model ): """simple docstring""" def __init__( self , __snake_case): super().__init__() _UpperCamelCase : List[Any] = tokenizer _UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case) _UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : Any = self.tokenizer(__snake_case) _UpperCamelCase : Dict = self.bert(**__snake_case) return out["pooler_output"] @require_tf @require_tensorflow_text class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): super().setUp() _UpperCamelCase : Optional[Any] = [ BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) _UpperCamelCase : Optional[Any] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] _UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1])) def A__ ( self): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest') _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences) _UpperCamelCase : Optional[Any] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf.function(__snake_case) for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : Optional[int] = tf.constant(__snake_case) _UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case) _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case) _UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences) _UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _UpperCamelCase : int = Path(__snake_case) / 'saved.model' model.save(__snake_case) _UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case) _UpperCamelCase : int = loaded_model(__snake_case) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
648
1
import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowerCAmelCase__ = { """169M""": 1_2, """430M""": 2_4, """1B5""": 2_4, """3B""": 3_2, """7B""": 3_2, """14B""": 4_0, } lowerCAmelCase__ = { """169M""": 7_6_8, """430M""": 1_0_2_4, """1B5""": 2_0_4_8, """3B""": 2_5_6_0, """7B""": 4_0_9_6, """14B""": 5_1_2_0, } def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : Any = list(state_dict.keys() ) for name in state_dict_keys: _UpperCamelCase : str = state_dict.pop(UpperCAmelCase_ ) # emb -> embedding if name.startswith('emb.' ): _UpperCamelCase : str = name.replace('emb.' , 'embeddings.' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('blocks.0.ln0' ): _UpperCamelCase : Tuple = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' ) # att -> attention _UpperCamelCase : int = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , UpperCAmelCase_ ) # ffn -> feed_forward _UpperCamelCase : str = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , UpperCAmelCase_ ) # time_mix_k -> time_mix_key and reshape if name.endswith('.time_mix_k' ): _UpperCamelCase : List[Any] = name.replace('.time_mix_k' , '.time_mix_key' ) # time_mix_v -> time_mix_value and reshape if name.endswith('.time_mix_v' ): _UpperCamelCase : int = name.replace('.time_mix_v' , '.time_mix_value' ) # time_mix_r -> time_mix_key and reshape if name.endswith('.time_mix_r' ): _UpperCamelCase : Optional[Any] = name.replace('.time_mix_r' , '.time_mix_receptance' ) if name != "head.weight": _UpperCamelCase : Any = 'rwkv.' + name _UpperCamelCase : Any = weight return state_dict def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=None ) -> List[Any]: '''simple docstring''' if tokenizer_file is None: print('No `--tokenizer_file` provided, we will use the default tokenizer.' ) _UpperCamelCase : Tuple = 5_0_2_7_7 _UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' ) else: _UpperCamelCase : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=UpperCAmelCase_ ) _UpperCamelCase : str = len(UpperCAmelCase_ ) tokenizer.save_pretrained(UpperCAmelCase_ ) # 2. Build the config _UpperCamelCase : Union[str, Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _UpperCamelCase : List[str] = candidate break if size is None: raise ValueError('Could not infer the size, please provide it with the `--size` argument.' ) if size not in possible_sizes: raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' ) _UpperCamelCase : int = RwkvConfig( vocab_size=UpperCAmelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(UpperCAmelCase_ ) # 3. Download model file then convert state_dict _UpperCamelCase : str = hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : Dict = convert_state_dict(UpperCAmelCase_ ) # 4. Split in shards and save _UpperCamelCase , _UpperCamelCase : List[str] = shard_checkpoint(UpperCAmelCase_ ) for shard_file, shard in shards.items(): torch.save(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) if index is not None: _UpperCamelCase : List[Any] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) # Save the index as well with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f: _UpperCamelCase : List[str] = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + '\n' f.write(UpperCAmelCase_ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( 'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' ) _UpperCamelCase : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _UpperCamelCase : List[str] = torch.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('Please provide a `model_name` to push the model to the Hub.' ) _UpperCamelCase : int = AutoModelForCausalLM.from_pretrained(UpperCAmelCase_ ) model.push_to_hub(UpperCAmelCase_ , max_shard_size='2GB' ) tokenizer.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint.""" ) parser.add_argument( """--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo.""" ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model.""" ) parser.add_argument( """--tokenizer_file""", default=None, type=str, help="""Path to the tokenizer file to use (if not provided, only the model is converted).""", ) parser.add_argument( """--size""", default=None, type=str, help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Push to the Hub the converted model.""", ) parser.add_argument( """--model_name""", default=None, type=str, help="""Name of the pushed model on the Hub, including the username / organization.""", ) lowerCAmelCase__ = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
648
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
from abc import ABC, abstractmethod from typing import Optional, Union from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit from ..utils.typing import NestedDataStructureLike, PathLike class lowercase ( _lowercase ): """simple docstring""" def __init__( self , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = None , **__snake_case , ): _UpperCamelCase : str = path_or_paths _UpperCamelCase : Any = split if split or isinstance(__snake_case , __snake_case) else 'train' _UpperCamelCase : List[Any] = features _UpperCamelCase : Union[str, Any] = cache_dir _UpperCamelCase : str = keep_in_memory _UpperCamelCase : Tuple = streaming _UpperCamelCase : List[str] = num_proc _UpperCamelCase : List[Any] = kwargs @abstractmethod def A__ ( self): pass class lowercase ( _lowercase ): """simple docstring""" def __init__( self , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = None , **__snake_case , ): _UpperCamelCase : Optional[Any] = features _UpperCamelCase : str = cache_dir _UpperCamelCase : List[Any] = keep_in_memory _UpperCamelCase : List[str] = streaming _UpperCamelCase : List[Any] = num_proc _UpperCamelCase : Optional[Any] = kwargs @abstractmethod def A__ ( self): pass
648
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : Optional[int] = image_size _UpperCamelCase : str = num_channels _UpperCamelCase : Optional[Any] = embeddings_size _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : str = is_training _UpperCamelCase : Optional[int] = use_labels _UpperCamelCase : str = hidden_act _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Tuple = len(__snake_case) _UpperCamelCase : Dict = out_features _UpperCamelCase : Union[str, Any] = out_indices _UpperCamelCase : int = num_groups def A__ ( self): _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : str = None if self.use_labels: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def A__ ( self): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = BitModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Dict = self.num_labels _UpperCamelCase : Dict = BitForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _UpperCamelCase : Any = None _UpperCamelCase : str = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self): _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Dict = BitModelTester(self) _UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='Bit does not output attentions') def A__ ( self): pass @unittest.skip(reason='Bit does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='Bit does not support input and output embeddings') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(__snake_case) _UpperCamelCase : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Optional[int] = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(config=__snake_case) for name, module in model.named_modules(): if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def A__ ( self): def check_hidden_states_output(__snake_case , __snake_case , __snake_case): _UpperCamelCase : str = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : str = self.model_tester.num_stages self.assertEqual(len(__snake_case) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : Any = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[str] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) @unittest.skip(reason='Bit does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def A__ ( self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): _UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Any = model(**__snake_case) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def A__ ( self): _UpperCamelCase : List[str] = BitModelTester(self)
648
1
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. lowerCAmelCase__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class lowercase ( unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING a__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: a__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: a__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = ZeroShotClassificationPipeline( model=__snake_case , tokenizer=__snake_case , candidate_labels=['polics', 'health']) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : Tuple = classifier('Who are you voting for in 2020?' , candidate_labels='politics') self.assertEqual(__snake_case , {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case)], 'scores': [ANY(__snake_case)]}) # No kwarg _UpperCamelCase : Tuple = classifier('Who are you voting for in 2020?' , ['politics']) self.assertEqual(__snake_case , {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case)], 'scores': [ANY(__snake_case)]}) _UpperCamelCase : str = classifier('Who are you voting for in 2020?' , candidate_labels=['politics']) self.assertEqual(__snake_case , {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case)], 'scores': [ANY(__snake_case)]}) _UpperCamelCase : Optional[Any] = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health') self.assertEqual( __snake_case , {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case), ANY(__snake_case)], 'scores': [ANY(__snake_case), ANY(__snake_case)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0) _UpperCamelCase : int = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health']) self.assertEqual( __snake_case , {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case), ANY(__snake_case)], 'scores': [ANY(__snake_case), ANY(__snake_case)]}) self.assertAlmostEqual(sum(nested_simplify(outputs['scores'])) , 1.0) _UpperCamelCase : str = classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}') self.assertEqual(__snake_case , {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case)], 'scores': [ANY(__snake_case)]}) # https://github.com/huggingface/transformers/issues/13846 _UpperCamelCase : Tuple = classifier(['I am happy'] , ['positive', 'negative']) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case), ANY(__snake_case)], 'scores': [ANY(__snake_case), ANY(__snake_case)]} for i in range(1) ] , ) _UpperCamelCase : Optional[int] = classifier(['I am happy', 'I am sad'] , ['positive', 'negative']) self.assertEqual( __snake_case , [ {'sequence': ANY(__snake_case), 'labels': [ANY(__snake_case), ANY(__snake_case)], 'scores': [ANY(__snake_case), ANY(__snake_case)]} for i in range(2) ] , ) with self.assertRaises(__snake_case): classifier('' , candidate_labels='politics') with self.assertRaises(__snake_case): classifier(__snake_case , candidate_labels='politics') with self.assertRaises(__snake_case): classifier('Who are you voting for in 2020?' , candidate_labels='') with self.assertRaises(__snake_case): classifier('Who are you voting for in 2020?' , candidate_labels=__snake_case) with self.assertRaises(__snake_case): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , ) with self.assertRaises(__snake_case): classifier( 'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=__snake_case , ) self.run_entailment_id(__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : str = zero_shot_classifier.model.config _UpperCamelCase : List[str] = config.labelaid _UpperCamelCase : Optional[Any] = zero_shot_classifier.entailment_id _UpperCamelCase : str = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2} self.assertEqual(zero_shot_classifier.entailment_id , -1) _UpperCamelCase : Optional[Any] = {'entailment': 0, 'neutral': 1, 'contradiction': 2} self.assertEqual(zero_shot_classifier.entailment_id , 0) _UpperCamelCase : Tuple = {'ENTAIL': 0, 'NON-ENTAIL': 1} self.assertEqual(zero_shot_classifier.entailment_id , 0) _UpperCamelCase : List[Any] = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0} self.assertEqual(zero_shot_classifier.entailment_id , 2) _UpperCamelCase : List[str] = original_labelaid self.assertEqual(__snake_case , zero_shot_classifier.entailment_id) @require_torch def A__ ( self): _UpperCamelCase : Tuple = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( 'Who are you voting for in 2020?' * 1_00 , candidate_labels=['politics', 'public health', 'science']) @require_torch def A__ ( self): _UpperCamelCase : str = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , ) _UpperCamelCase : Optional[int] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(__snake_case) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def A__ ( self): _UpperCamelCase : str = pipeline( 'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , ) _UpperCamelCase : List[str] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(__snake_case) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['science', 'public health', 'politics'], 'scores': [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def A__ ( self): _UpperCamelCase : Dict = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt') _UpperCamelCase : Union[str, Any] = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(__snake_case) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) _UpperCamelCase : Dict = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__snake_case , ) self.assertEqual( nested_simplify(__snake_case) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def A__ ( self): _UpperCamelCase : str = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf') _UpperCamelCase : Dict = zero_shot_classifier( 'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science']) self.assertEqual( nested_simplify(__snake_case) , { 'sequence': 'Who are you voting for in 2020?', 'labels': ['politics', 'public health', 'science'], 'scores': [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) _UpperCamelCase : List[str] = zero_shot_classifier( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks' ' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder' ' through an attention mechanism. We propose a new simple network architecture, the Transformer, based' ' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two' ' machine translation tasks show these models to be superior in quality while being more parallelizable' ' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014' ' English-to-German translation task, improving over the existing best results, including ensembles by' ' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new' ' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small' ' fraction of the training costs of the best models from the literature. We show that the Transformer' ' generalizes well to other tasks by applying it successfully to English constituency parsing both with' ' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=__snake_case , ) self.assertEqual( nested_simplify(__snake_case) , { 'sequence': ( 'The dominant sequence transduction models are based on complex recurrent or convolutional neural' ' networks in an encoder-decoder configuration. The best performing models also connect the' ' encoder and decoder through an attention mechanism. We propose a new simple network' ' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence' ' and convolutions entirely. Experiments on two machine translation tasks show these models to be' ' superior in quality while being more parallelizable and requiring significantly less time to' ' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,' ' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014' ' English-to-French translation task, our model establishes a new single-model state-of-the-art' ' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training' ' costs of the best models from the literature. We show that the Transformer generalizes well to' ' other tasks by applying it successfully to English constituency parsing both with large and' ' limited training data.' ), 'labels': ['translation', 'machine learning', 'vision', 'statistics'], 'scores': [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
648
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowerCAmelCase__ = numpy.array([0, 0]) lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54]) lowerCAmelCase__ = numpy.array([1, 0]) lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : Tuple = initial_vectors for _ in range(UpperCAmelCase_ ): _UpperCamelCase : str = iteration_step(UpperCAmelCase_ ) return vectors def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : int = [] for i, start_vector in enumerate(vectors[:-1] ): _UpperCamelCase : Union[str, Any] = vectors[i + 1] new_vectors.append(UpperCAmelCase_ ) _UpperCamelCase : Tuple = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray: '''simple docstring''' _UpperCamelCase : str = numpy.radians(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ ) _UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) ) return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None: '''simple docstring''' _UpperCamelCase : str = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ ) plt.plot(UpperCAmelCase_ , UpperCAmelCase_ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
648
1
from __future__ import annotations import requests lowerCAmelCase__ = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = "new" , UpperCAmelCase_ : list | None = None ) -> dict: '''simple docstring''' _UpperCamelCase : Tuple = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(UpperCAmelCase_ ) - valid_terms ) ): _UpperCamelCase : Optional[Any] = F'''Invalid search term: {invalid_search_terms}''' raise ValueError(UpperCAmelCase_ ) _UpperCamelCase : str = requests.get( F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'User-agent': 'A random string'} , ) if response.status_code == 4_2_9: raise requests.HTTPError _UpperCamelCase : Any = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(UpperCAmelCase_ )} _UpperCamelCase : Any = {} for id_ in range(UpperCAmelCase_ ): _UpperCamelCase : List[str] = { item: data['data']['children'][id_]['data'][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
648
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple: '''simple docstring''' if exitstatus == 5: _UpperCamelCase : List[Any] = 0 # Doctest custom flag to ignore output. lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""") lowerCAmelCase__ = doctest.OutputChecker class lowercase ( _lowercase ): """simple docstring""" def A__ ( self , __snake_case , __snake_case , __snake_case): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case) lowerCAmelCase__ = CustomOutputChecker lowerCAmelCase__ = HfDoctestModule lowerCAmelCase__ = HfDocTestParser
648
1
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class lowercase ( _lowercase ): """simple docstring""" def __get__( self , __snake_case , __snake_case=None): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute') _UpperCamelCase : List[Any] = '__cached_' + self.fget.__name__ _UpperCamelCase : List[str] = getattr(__snake_case , __snake_case , __snake_case) if cached is None: _UpperCamelCase : str = self.fget(__snake_case) setattr(__snake_case , __snake_case , __snake_case) return cached def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> str: '''simple docstring''' _UpperCamelCase : int = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Optional[Any]: '''simple docstring''' if is_torch_fx_proxy(UpperCAmelCase_ ): return True if is_torch_available(): import torch if isinstance(UpperCAmelCase_ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(UpperCAmelCase_ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(UpperCAmelCase_ , (jnp.ndarray, Tracer) ): return True return isinstance(UpperCAmelCase_ , np.ndarray ) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Any: '''simple docstring''' return isinstance(UpperCAmelCase_ , np.ndarray ) def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> str: '''simple docstring''' return _is_numpy(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Optional[int]: '''simple docstring''' import torch return isinstance(UpperCAmelCase_ , torch.Tensor ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Any: '''simple docstring''' return False if not is_torch_available() else _is_torch(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> str: '''simple docstring''' import torch return isinstance(UpperCAmelCase_ , torch.device ) def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> int: '''simple docstring''' return False if not is_torch_available() else _is_torch_device(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Tuple: '''simple docstring''' import torch if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): if hasattr(UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : Any = getattr(UpperCAmelCase_ , UpperCAmelCase_ ) else: return False return isinstance(UpperCAmelCase_ , torch.dtype ) def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> str: '''simple docstring''' return False if not is_torch_available() else _is_torch_dtype(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' import tensorflow as tf return isinstance(UpperCAmelCase_ , tf.Tensor ) def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Optional[int]: '''simple docstring''' return False if not is_tf_available() else _is_tensorflow(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Tuple: '''simple docstring''' import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(UpperCAmelCase_ , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(UpperCAmelCase_ ) return type(UpperCAmelCase_ ) == tf.Tensor def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[str]: '''simple docstring''' return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Union[str, Any]: '''simple docstring''' import jax.numpy as jnp # noqa: F811 return isinstance(UpperCAmelCase_ , jnp.ndarray ) def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return False if not is_flax_available() else _is_jax(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> List[Any]: '''simple docstring''' if isinstance(UpperCAmelCase_ , (dict, UserDict) ): return {k: to_py_obj(UpperCAmelCase_ ) for k, v in obj.items()} elif isinstance(UpperCAmelCase_ , (list, tuple) ): return [to_py_obj(UpperCAmelCase_ ) for o in obj] elif is_tf_tensor(UpperCAmelCase_ ): return obj.numpy().tolist() elif is_torch_tensor(UpperCAmelCase_ ): return obj.detach().cpu().tolist() elif is_jax_tensor(UpperCAmelCase_ ): return np.asarray(UpperCAmelCase_ ).tolist() elif isinstance(UpperCAmelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Any: '''simple docstring''' if isinstance(UpperCAmelCase_ , (dict, UserDict) ): return {k: to_numpy(UpperCAmelCase_ ) for k, v in obj.items()} elif isinstance(UpperCAmelCase_ , (list, tuple) ): return np.array(UpperCAmelCase_ ) elif is_tf_tensor(UpperCAmelCase_ ): return obj.numpy() elif is_torch_tensor(UpperCAmelCase_ ): return obj.detach().cpu().numpy() elif is_jax_tensor(UpperCAmelCase_ ): return np.asarray(UpperCAmelCase_ ) else: return obj class lowercase ( _lowercase ): """simple docstring""" def A__ ( self): _UpperCamelCase : Optional[int] = fields(self) # Safety and consistency checks if not len(__snake_case): raise ValueError(f'''{self.__class__.__name__} has no fields.''') if not all(field.default is None for field in class_fields[1:]): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''') _UpperCamelCase : List[str] = getattr(self , class_fields[0].name) _UpperCamelCase : List[str] = all(getattr(self , field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__snake_case): if isinstance(__snake_case , __snake_case): _UpperCamelCase : int = first_field.items() _UpperCamelCase : Union[str, Any] = True else: try: _UpperCamelCase : str = iter(__snake_case) _UpperCamelCase : Dict = True except TypeError: _UpperCamelCase : Optional[Any] = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__snake_case): if ( not isinstance(__snake_case , (list, tuple)) or not len(__snake_case) == 2 or not isinstance(element[0] , __snake_case) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute _UpperCamelCase : Optional[int] = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''') break setattr(self , element[0] , element[1]) if element[1] is not None: _UpperCamelCase : Dict = element[1] elif first_field is not None: _UpperCamelCase : List[Any] = first_field else: for field in class_fields: _UpperCamelCase : List[str] = getattr(self , field.name) if v is not None: _UpperCamelCase : Any = v def __delitem__( self , *__snake_case , **__snake_case): raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''') def A__ ( self , *__snake_case , **__snake_case): raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''') def A__ ( self , *__snake_case , **__snake_case): raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''') def A__ ( self , *__snake_case , **__snake_case): raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''') def __getitem__( self , __snake_case): if isinstance(__snake_case , __snake_case): _UpperCamelCase : int = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self , __snake_case , __snake_case): if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__snake_case , __snake_case) super().__setattr__(__snake_case , __snake_case) def __setitem__( self , __snake_case , __snake_case): # Will raise a KeyException if needed super().__setitem__(__snake_case , __snake_case) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__snake_case , __snake_case) def A__ ( self): return tuple(self[k] for k in self.keys()) class lowercase ( _lowercase , _lowercase ): """simple docstring""" @classmethod def A__ ( cls , __snake_case): raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}''') class lowercase ( _lowercase ): """simple docstring""" a__ = "longest" a__ = "max_length" a__ = "do_not_pad" class lowercase ( _lowercase ): """simple docstring""" a__ = "pt" a__ = "tf" a__ = "np" a__ = "jax" class lowercase : """simple docstring""" def __init__( self , __snake_case): _UpperCamelCase : Dict = context_managers _UpperCamelCase : str = ExitStack() def __enter__( self): for context_manager in self.context_managers: self.stack.enter_context(__snake_case) def __exit__( self , *__snake_case , **__snake_case): self.stack.__exit__(*__snake_case , **__snake_case) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Dict: '''simple docstring''' _UpperCamelCase : Optional[Any] = infer_framework(UpperCAmelCase_ ) if framework == "tf": _UpperCamelCase : List[Any] = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _UpperCamelCase : str = inspect.signature(model_class.forward ) # PyTorch models else: _UpperCamelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = model_class.__name__ _UpperCamelCase : str = infer_framework(UpperCAmelCase_ ) if framework == "tf": _UpperCamelCase : int = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": _UpperCamelCase : List[Any] = inspect.signature(model_class.forward ) # PyTorch models else: _UpperCamelCase : Dict = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def lowerCamelCase_ ( UpperCAmelCase_ : MutableMapping , UpperCAmelCase_ : str = "" , UpperCAmelCase_ : str = "." ) -> Any: '''simple docstring''' def _flatten_dict(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]="" , UpperCAmelCase_ : Any="." ): for k, v in d.items(): _UpperCamelCase : Tuple = str(UpperCAmelCase_ ) + delimiter + str(UpperCAmelCase_ ) if parent_key else k if v and isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): yield from flatten_dict(UpperCAmelCase_ , UpperCAmelCase_ , delimiter=UpperCAmelCase_ ).items() else: yield key, v return dict(_flatten_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ) @contextmanager def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : bool = False ) -> Dict: '''simple docstring''' if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str=None ) -> str: '''simple docstring''' if is_numpy_array(UpperCAmelCase_ ): return np.transpose(UpperCAmelCase_ , axes=UpperCAmelCase_ ) elif is_torch_tensor(UpperCAmelCase_ ): return array.T if axes is None else array.permute(*UpperCAmelCase_ ) elif is_tf_tensor(UpperCAmelCase_ ): import tensorflow as tf return tf.transpose(UpperCAmelCase_ , perm=UpperCAmelCase_ ) elif is_jax_tensor(UpperCAmelCase_ ): return jnp.transpose(UpperCAmelCase_ , axes=UpperCAmelCase_ ) else: raise ValueError(F'''Type not supported for transpose: {type(UpperCAmelCase_ )}.''' ) def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) -> Union[str, Any]: '''simple docstring''' if is_numpy_array(UpperCAmelCase_ ): return np.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) elif is_torch_tensor(UpperCAmelCase_ ): return array.reshape(*UpperCAmelCase_ ) elif is_tf_tensor(UpperCAmelCase_ ): import tensorflow as tf return tf.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) elif is_jax_tensor(UpperCAmelCase_ ): return jnp.reshape(UpperCAmelCase_ , UpperCAmelCase_ ) else: raise ValueError(F'''Type not supported for reshape: {type(UpperCAmelCase_ )}.''' ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=None ) -> Any: '''simple docstring''' if is_numpy_array(UpperCAmelCase_ ): return np.squeeze(UpperCAmelCase_ , axis=UpperCAmelCase_ ) elif is_torch_tensor(UpperCAmelCase_ ): return array.squeeze() if axis is None else array.squeeze(dim=UpperCAmelCase_ ) elif is_tf_tensor(UpperCAmelCase_ ): import tensorflow as tf return tf.squeeze(UpperCAmelCase_ , axis=UpperCAmelCase_ ) elif is_jax_tensor(UpperCAmelCase_ ): return jnp.squeeze(UpperCAmelCase_ , axis=UpperCAmelCase_ ) else: raise ValueError(F'''Type not supported for squeeze: {type(UpperCAmelCase_ )}.''' ) def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) -> Optional[int]: '''simple docstring''' if is_numpy_array(UpperCAmelCase_ ): return np.expand_dims(UpperCAmelCase_ , UpperCAmelCase_ ) elif is_torch_tensor(UpperCAmelCase_ ): return array.unsqueeze(dim=UpperCAmelCase_ ) elif is_tf_tensor(UpperCAmelCase_ ): import tensorflow as tf return tf.expand_dims(UpperCAmelCase_ , axis=UpperCAmelCase_ ) elif is_jax_tensor(UpperCAmelCase_ ): return jnp.expand_dims(UpperCAmelCase_ , axis=UpperCAmelCase_ ) else: raise ValueError(F'''Type not supported for expand_dims: {type(UpperCAmelCase_ )}.''' ) def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Any: '''simple docstring''' if is_numpy_array(UpperCAmelCase_ ): return np.size(UpperCAmelCase_ ) elif is_torch_tensor(UpperCAmelCase_ ): return array.numel() elif is_tf_tensor(UpperCAmelCase_ ): import tensorflow as tf return tf.size(UpperCAmelCase_ ) elif is_jax_tensor(UpperCAmelCase_ ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(UpperCAmelCase_ )}.''' ) def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Optional[int]: '''simple docstring''' for key, value in auto_map.items(): if isinstance(UpperCAmelCase_ , (tuple, list) ): _UpperCamelCase : Optional[int] = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: _UpperCamelCase : int = F'''{repo_id}--{value}''' return auto_map def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' for base_class in inspect.getmro(UpperCAmelCase_ ): _UpperCamelCase : List[str] = base_class.__module__ _UpperCamelCase : Optional[Any] = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
648
lowerCAmelCase__ = range(2, 2_0 + 1) lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)] lowerCAmelCase__ = {} def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) _UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) _UpperCamelCase , _UpperCamelCase : Dict = 0, 0 _UpperCamelCase : Optional[int] = n - i _UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ ) if sub_memo is not None: _UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over _UpperCamelCase : str = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCamelCase : Optional[Any] = _k break if max_jump >= 0: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCamelCase : Tuple = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: _UpperCamelCase : Union[str, Any] = [] else: _UpperCamelCase : List[Any] = {c: []} _UpperCamelCase : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped _UpperCamelCase : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCamelCase : Union[str, Any] = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCamelCase : Any = i _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCamelCase : Union[str, Any] = ds_c + ds_b diff += addend _UpperCamelCase : Union[str, Any] = 0 for j in range(UpperCAmelCase_ ): _UpperCamelCase : Union[str, Any] = a_i[j] + addend _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): _UpperCamelCase : List[str] = digits[j] + addend if s >= 1_0: _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) _UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient else: _UpperCamelCase : Dict = s _UpperCamelCase : Optional[Any] = addend // 1_0 if addend == 0: break while addend > 0: _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [1] _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : int = 0 while True: _UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break _UpperCamelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'{solution() = }')
648
1
import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowercase : """simple docstring""" @staticmethod def A__ ( *__snake_case , **__snake_case): pass @is_pipeline_test @require_vision @require_torch class lowercase ( unittest.TestCase ): """simple docstring""" a__ = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection') _UpperCamelCase : List[Any] = [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] return object_detector, examples def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : int = object_detector(examples[0] , threshold=0.0) _UpperCamelCase : Tuple = len(__snake_case) self.assertGreater(__snake_case , 0) self.assertEqual( __snake_case , [ { 'score': ANY(__snake_case), 'label': ANY(__snake_case), 'box': {'xmin': ANY(__snake_case), 'ymin': ANY(__snake_case), 'xmax': ANY(__snake_case), 'ymax': ANY(__snake_case)}, } for i in range(__snake_case) ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF') def A__ ( self): pass @require_torch def A__ ( self): _UpperCamelCase : str = pipeline( 'zero-shot-object-detection' , model='hf-internal-testing/tiny-random-owlvit-object-detection') _UpperCamelCase : Dict = object_detector( './tests/fixtures/tests_samples/COCO/000000039769.png' , candidate_labels=['cat', 'remote', 'couch'] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__snake_case , decimals=4) , [ {'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}}, {'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}}, {'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}}, {'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}}, {'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}}, {'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}}, {'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}}, {'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}}, {'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}}, ] , ) _UpperCamelCase : Union[str, Any] = object_detector( [ { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'candidate_labels': ['cat', 'remote', 'couch'], } ] , threshold=0.6_4 , ) self.assertEqual( nested_simplify(__snake_case , decimals=4) , [ [ {'score': 0.7_2_3_5, 'label': 'cat', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}}, {'score': 0.7_2_1_8, 'label': 'remote', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}}, {'score': 0.7_1_8_4, 'label': 'couch', 'box': {'xmin': 2_04, 'ymin': 1_67, 'xmax': 2_32, 'ymax': 1_90}}, {'score': 0.6_7_4_8, 'label': 'remote', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}}, {'score': 0.6_6_5_6, 'label': 'cat', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}}, {'score': 0.6_6_1_4, 'label': 'couch', 'box': {'xmin': 5_71, 'ymin': 83, 'xmax': 5_98, 'ymax': 1_03}}, {'score': 0.6_4_5_6, 'label': 'remote', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}}, {'score': 0.6_4_2, 'label': 'remote', 'box': {'xmin': 67, 'ymin': 2_74, 'xmax': 93, 'ymax': 2_97}}, {'score': 0.6_4_1_9, 'label': 'cat', 'box': {'xmin': 4_94, 'ymin': 1_05, 'xmax': 5_21, 'ymax': 1_27}}, ] ] , ) @require_torch @slow def A__ ( self): _UpperCamelCase : Any = pipeline('zero-shot-object-detection') _UpperCamelCase : List[str] = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , ) self.assertEqual( nested_simplify(__snake_case , decimals=4) , [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}}, {'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}}, {'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}}, ] , ) _UpperCamelCase : int = object_detector( [ { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, { 'image': 'http://images.cocodataset.org/val2017/000000039769.jpg', 'candidate_labels': ['cat', 'remote', 'couch'], }, ] , ) self.assertEqual( nested_simplify(__snake_case , decimals=4) , [ [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}}, {'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}}, {'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}}, ], [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}}, {'score': 0.1_4_7_4, 'label': 'remote', 'box': {'xmin': 3_35, 'ymin': 74, 'xmax': 3_71, 'ymax': 1_87}}, {'score': 0.1_2_0_8, 'label': 'couch', 'box': {'xmin': 4, 'ymin': 0, 'xmax': 6_42, 'ymax': 4_76}}, ], ] , ) @require_tf @unittest.skip('Zero Shot Object Detection not implemented in TF') def A__ ( self): pass @require_torch @slow def A__ ( self): _UpperCamelCase : List[str] = 0.2 _UpperCamelCase : Dict = pipeline('zero-shot-object-detection') _UpperCamelCase : List[Any] = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , threshold=__snake_case , ) self.assertEqual( nested_simplify(__snake_case , decimals=4) , [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}}, {'score': 0.2_5_3_7, 'label': 'cat', 'box': {'xmin': 1, 'ymin': 55, 'xmax': 3_15, 'ymax': 4_72}}, ] , ) @require_torch @slow def A__ ( self): _UpperCamelCase : List[str] = 2 _UpperCamelCase : List[str] = pipeline('zero-shot-object-detection') _UpperCamelCase : Any = object_detector( 'http://images.cocodataset.org/val2017/000000039769.jpg' , candidate_labels=['cat', 'remote', 'couch'] , top_k=__snake_case , ) self.assertEqual( nested_simplify(__snake_case , decimals=4) , [ {'score': 0.2_8_6_8, 'label': 'cat', 'box': {'xmin': 3_24, 'ymin': 20, 'xmax': 6_40, 'ymax': 3_73}}, {'score': 0.2_7_7, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 72, 'xmax': 1_77, 'ymax': 1_15}}, ] , )
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowercase ( _lowercase ): """simple docstring""" a__ = "vit_mae" def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : str = hidden_act _UpperCamelCase : List[str] = hidden_dropout_prob _UpperCamelCase : List[Any] = attention_probs_dropout_prob _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : int = image_size _UpperCamelCase : Any = patch_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : Union[str, Any] = qkv_bias _UpperCamelCase : str = decoder_num_attention_heads _UpperCamelCase : Union[str, Any] = decoder_hidden_size _UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers _UpperCamelCase : Any = decoder_intermediate_size _UpperCamelCase : int = mask_ratio _UpperCamelCase : List[Any] = norm_pix_loss
648
1
import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=18 , __snake_case=30 , __snake_case=4_00 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=[0.5, 0.5, 0.5] , __snake_case=[0.5, 0.5, 0.5] , ): _UpperCamelCase : Tuple = size if size is not None else {'height': 18, 'width': 18} _UpperCamelCase : int = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : str = num_channels _UpperCamelCase : Tuple = image_size _UpperCamelCase : Optional[int] = min_resolution _UpperCamelCase : int = max_resolution _UpperCamelCase : Optional[Any] = do_resize _UpperCamelCase : Union[str, Any] = size _UpperCamelCase : Any = do_normalize _UpperCamelCase : Any = image_mean _UpperCamelCase : Dict = image_std def A__ ( self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = DPTImageProcessor if is_vision_available() else None def A__ ( self): _UpperCamelCase : Optional[Any] = DPTImageProcessingTester(self) @property def A__ ( self): return self.image_processor_tester.prepare_image_processor_dict() def A__ ( self): _UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(__snake_case , 'image_mean')) self.assertTrue(hasattr(__snake_case , 'image_std')) self.assertTrue(hasattr(__snake_case , 'do_normalize')) self.assertTrue(hasattr(__snake_case , 'do_resize')) self.assertTrue(hasattr(__snake_case , 'size')) def A__ ( self): _UpperCamelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'height': 18, 'width': 18}) _UpperCamelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42) self.assertEqual(image_processor.size , {'height': 42, 'width': 42}) def A__ ( self): # Initialize image_processing _UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PIL images _UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case) for image in image_inputs: self.assertIsInstance(__snake_case , Image.Image) # Test not batched input _UpperCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCamelCase : str = image_processing(__snake_case , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def A__ ( self): # Initialize image_processing _UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors _UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case) for image in image_inputs: self.assertIsInstance(__snake_case , np.ndarray) # Test not batched input _UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCamelCase : Optional[Any] = image_processing(__snake_case , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) def A__ ( self): # Initialize image_processing _UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors _UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case) for image in image_inputs: self.assertIsInstance(__snake_case , torch.Tensor) # Test not batched input _UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , ) # Test batched _UpperCamelCase : Tuple = image_processing(__snake_case , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['height'], self.image_processor_tester.size['width'], ) , )
648
import functools def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int: '''simple docstring''' if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase_ ) == 0: return 0 if min(UpperCAmelCase_ ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase_ ) >= 3_6_6: raise ValueError('All days elements should be less than 366' ) _UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ ) @functools.cache def dynamic_programming(UpperCAmelCase_ : int ) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
648
1
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): _UpperCamelCase : Optional[Any] = ['a', 'b', 'c'] # Defaults to last layer if both are None _UpperCamelCase , _UpperCamelCase : Any = get_aligned_output_features_output_indices(__snake_case , __snake_case , __snake_case) self.assertEqual(__snake_case , ['c']) self.assertEqual(__snake_case , [2]) # Out indices set to match out features _UpperCamelCase , _UpperCamelCase : List[Any] = get_aligned_output_features_output_indices(['a', 'c'] , __snake_case , __snake_case) self.assertEqual(__snake_case , ['a', 'c']) self.assertEqual(__snake_case , [0, 2]) # Out features set to match out indices _UpperCamelCase , _UpperCamelCase : int = get_aligned_output_features_output_indices(__snake_case , [0, 2] , __snake_case) self.assertEqual(__snake_case , ['a', 'c']) self.assertEqual(__snake_case , [0, 2]) # Out features selected from negative indices _UpperCamelCase , _UpperCamelCase : Union[str, Any] = get_aligned_output_features_output_indices(__snake_case , [-3, -1] , __snake_case) self.assertEqual(__snake_case , ['a', 'c']) self.assertEqual(__snake_case , [-3, -1]) def A__ ( self): # Stage names must be set with self.assertRaises(__snake_case): verify_out_features_out_indices(['a', 'b'] , (0, 1) , __snake_case) # Out features must be a list with self.assertRaises(__snake_case): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b']) # Out features must be a subset of stage names with self.assertRaises(__snake_case): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a']) # Out indices must be a list or tuple with self.assertRaises(__snake_case): verify_out_features_out_indices(__snake_case , 0 , ['a', 'b']) # Out indices must be a subset of stage names with self.assertRaises(__snake_case): verify_out_features_out_indices(__snake_case , (0, 1) , ['a']) # Out features and out indices must be the same length with self.assertRaises(__snake_case): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c']) # Out features should match out indices with self.assertRaises(__snake_case): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c']) # Out features and out indices should be in order with self.assertRaises(__snake_case): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b']) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd']) def A__ ( self): _UpperCamelCase : str = BackboneMixin() _UpperCamelCase : Optional[int] = ['a', 'b', 'c'] _UpperCamelCase : List[Any] = ['a', 'c'] _UpperCamelCase : Union[str, Any] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c']) self.assertEqual(backbone.out_indices , [0, 2]) # Check out features and indices are updated correctly _UpperCamelCase : int = ['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b']) self.assertEqual(backbone.out_indices , [0, 1]) _UpperCamelCase : List[str] = [-3, -1] self.assertEqual(backbone.out_features , ['a', 'c']) self.assertEqual(backbone.out_indices , [-3, -1])
648
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Optional[Any] = batch_size _UpperCamelCase : int = seq_length _UpperCamelCase : str = is_training _UpperCamelCase : Tuple = use_input_mask _UpperCamelCase : Union[str, Any] = use_token_type_ids _UpperCamelCase : Union[str, Any] = use_labels _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[Any] = embedding_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : int = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : Tuple = hidden_dropout_prob _UpperCamelCase : int = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : List[str] = type_vocab_size _UpperCamelCase : Dict = type_sequence_label_size _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : Optional[Any] = num_labels _UpperCamelCase : Tuple = num_choices _UpperCamelCase : List[str] = scope def A__ ( self): _UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase : Any = None if self.use_input_mask: _UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase : Optional[Any] = None if self.use_token_type_ids: _UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase : int = None _UpperCamelCase : List[str] = None _UpperCamelCase : Dict = None if self.use_labels: _UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[str] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[int] = self.num_labels _UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Any = self.num_labels _UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = self.num_choices _UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) : Optional[int] = config_and_inputs _UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True # test_resize_embeddings = False a__ = False def A__ ( self , __snake_case , __snake_case , __snake_case=False): _UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case) if return_labels: if model_class in get_values(__snake_case): _UpperCamelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case) _UpperCamelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case) return inputs_dict def A__ ( self): _UpperCamelCase : Any = MegatronBertModelTester(self) _UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37) def A__ ( self): self.config_tester.run_common_tests() def A__ ( self): _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case) def A__ ( self): _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case) def A__ ( self): _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case) def A__ ( self): _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case) def A__ ( self): _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]: '''simple docstring''' return torch.tensor( UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , ) lowerCAmelCase__ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.') def A__ ( self): _UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: _UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case) _UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case) model.to(__snake_case) model.half() _UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): _UpperCamelCase : str = model(__snake_case)[0] _UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24)) self.assertEqual(output.shape , __snake_case) _UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3): for jj in range(3): _UpperCamelCase : Optional[Any] = output[0, ii, jj] _UpperCamelCase : Dict = expected[3 * ii + jj] _UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
648
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
648
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = """▁""" lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase__ = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } lowerCAmelCase__ = { """xlm-roberta-base""": 5_1_2, """xlm-roberta-large""": 5_1_2, """xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2, """xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2, """xlm-roberta-large-finetuned-conll03-english""": 5_1_2, """xlm-roberta-large-finetuned-conll03-german""": 5_1_2, } class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token _UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) _UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__snake_case)) _UpperCamelCase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase : List[Any] = 1 _UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset _UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self): _UpperCamelCase : List[Any] = self.__dict__.copy() _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self , __snake_case): _UpperCamelCase : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCamelCase : Tuple = {} _UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def A__ ( self , __snake_case , __snake_case = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase : Tuple = [self.cls_token_id] _UpperCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is None: return [1] + ([0] * len(__snake_case)) + [1] return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def A__ ( self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def A__ ( self): _UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def A__ ( self , __snake_case): return self.sp_model.encode(__snake_case , out_type=__snake_case) def A__ ( self , __snake_case): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase : str = self.sp_model.PieceToId(__snake_case) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , __snake_case): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def A__ ( self , __snake_case): _UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip() return out_string def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCamelCase : str = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __snake_case) elif not os.path.isfile(self.vocab_file): with open(__snake_case , 'wb') as fi: _UpperCamelCase : Any = self.sp_model.serialized_model_proto() fi.write(__snake_case) return (out_vocab_file,)
648
1
from __future__ import annotations import collections import pprint from pathlib import Path def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' return "".join(sorted(UpperCAmelCase_ ) ) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> list[str]: '''simple docstring''' return word_by_signature[signature(UpperCAmelCase_ )] lowerCAmelCase__ = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") lowerCAmelCase__ = sorted({word.strip().lower() for word in data.splitlines()}) lowerCAmelCase__ = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": lowerCAmelCase__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
648
from ...processing_utils import ProcessorMixin class lowercase ( _lowercase ): """simple docstring""" a__ = ["image_processor", "feature_extractor"] a__ = "TvltImageProcessor" a__ = "TvltFeatureExtractor" def __init__( self , __snake_case , __snake_case): super().__init__(image_processor=__snake_case , feature_extractor=__snake_case) _UpperCamelCase : List[str] = image_processor _UpperCamelCase : Dict = feature_extractor def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') _UpperCamelCase : Union[str, Any] = None if images is not None: _UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case) if images_mixed is not None: _UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case) if audio is not None: _UpperCamelCase : Tuple = self.feature_extractor( __snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case) _UpperCamelCase : Tuple = {} if audio is not None: output_dict.update(__snake_case) if images is not None: output_dict.update(__snake_case) if images_mixed_dict is not None: output_dict.update(__snake_case) return output_dict @property def A__ ( self): _UpperCamelCase : List[Any] = self.image_processor.model_input_names _UpperCamelCase : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
648
1
import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap lowerCAmelCase__ = """Usage of script: script_name <size_of_canvas:int>""" lowerCAmelCase__ = [0] * 1_0_0 + [1] * 1_0 random.shuffle(choice) def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> list[list[bool]]: '''simple docstring''' _UpperCamelCase : List[str] = [[False for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )] return canvas def lowerCamelCase_ ( UpperCAmelCase_ : list[list[bool]] ) -> None: '''simple docstring''' for i, row in enumerate(UpperCAmelCase_ ): for j, _ in enumerate(UpperCAmelCase_ ): _UpperCamelCase : int = bool(random.getrandbits(1 ) ) def lowerCamelCase_ ( UpperCAmelCase_ : list[list[bool]] ) -> list[list[bool]]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = np.array(UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(UpperCAmelCase_ ): for c, pt in enumerate(UpperCAmelCase_ ): _UpperCamelCase : Tuple = __judge_point( UpperCAmelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) _UpperCamelCase : Any = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. _UpperCamelCase : list[list[bool]] = current_canvas.tolist() return return_canvas def lowerCamelCase_ ( UpperCAmelCase_ : bool , UpperCAmelCase_ : list[list[bool]] ) -> bool: '''simple docstring''' _UpperCamelCase : Tuple = 0 _UpperCamelCase : Optional[int] = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. _UpperCamelCase : Dict = pt if pt: if alive < 2: _UpperCamelCase : List[str] = False elif alive == 2 or alive == 3: _UpperCamelCase : int = True elif alive > 3: _UpperCamelCase : Dict = False else: if alive == 3: _UpperCamelCase : List[Any] = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) lowerCAmelCase__ = int(sys.argv[1]) # main working structure of this module. lowerCAmelCase__ = create_canvas(canvas_size) seed(c) lowerCAmelCase__ , lowerCAmelCase__ = plt.subplots() fig.show() lowerCAmelCase__ = ListedColormap(["""w""", """k"""]) try: while True: lowerCAmelCase__ = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase ( _lowercase ): """simple docstring""" a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ): _UpperCamelCase : str = vocab_size _UpperCamelCase : int = context_length _UpperCamelCase : Tuple = hidden_size _UpperCamelCase : Tuple = num_hidden_layers _UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Dict = rescale_every _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
648
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(_lowercase ) class lowercase ( _lowercase ): """simple docstring""" def __init__( self , **__snake_case): super().__init__(**__snake_case) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''') requires_backends(self , 'vision') self.check_model_type(__snake_case) def __call__( self , __snake_case , __snake_case = None , **__snake_case , ): if "text_queries" in kwargs: _UpperCamelCase : List[Any] = kwargs.pop('text_queries') if isinstance(__snake_case , (str, Image.Image)): _UpperCamelCase : Union[str, Any] = {'image': image, 'candidate_labels': candidate_labels} else: _UpperCamelCase : int = image _UpperCamelCase : Tuple = super().__call__(__snake_case , **__snake_case) return results def A__ ( self , **__snake_case): _UpperCamelCase : int = {} if "threshold" in kwargs: _UpperCamelCase : Tuple = kwargs['threshold'] if "top_k" in kwargs: _UpperCamelCase : Optional[Any] = kwargs['top_k'] return {}, {}, postprocess_params def A__ ( self , __snake_case): _UpperCamelCase : Dict = load_image(inputs['image']) _UpperCamelCase : Dict = inputs['candidate_labels'] if isinstance(__snake_case , __snake_case): _UpperCamelCase : Tuple = candidate_labels.split(',') _UpperCamelCase : str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(__snake_case): _UpperCamelCase : Optional[Any] = self.tokenizer(__snake_case , return_tensors=self.framework) _UpperCamelCase : int = self.image_processor(__snake_case , return_tensors=self.framework) yield { "is_last": i == len(__snake_case) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def A__ ( self , __snake_case): _UpperCamelCase : Any = model_inputs.pop('target_size') _UpperCamelCase : str = model_inputs.pop('candidate_label') _UpperCamelCase : List[str] = model_inputs.pop('is_last') _UpperCamelCase : Any = self.model(**__snake_case) _UpperCamelCase : List[str] = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs} return model_outputs def A__ ( self , __snake_case , __snake_case=0.1 , __snake_case=None): _UpperCamelCase : Union[str, Any] = [] for model_output in model_outputs: _UpperCamelCase : Union[str, Any] = model_output['candidate_label'] _UpperCamelCase : Any = BaseModelOutput(__snake_case) _UpperCamelCase : Union[str, Any] = self.image_processor.post_process_object_detection( outputs=__snake_case , threshold=__snake_case , target_sizes=model_output['target_size'])[0] for index in outputs["scores"].nonzero(): _UpperCamelCase : Optional[Any] = outputs['scores'][index].item() _UpperCamelCase : int = self._get_bounding_box(outputs['boxes'][index][0]) _UpperCamelCase : Optional[int] = {'score': score, 'label': label, 'box': box} results.append(__snake_case) _UpperCamelCase : Any = sorted(__snake_case , key=lambda __snake_case: x["score"] , reverse=__snake_case) if top_k: _UpperCamelCase : str = results[:top_k] return results def A__ ( self , __snake_case): if self.framework != "pt": raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.') _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = box.int().tolist() _UpperCamelCase : str = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
648
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class lowercase ( _lowercase ): """simple docstring""" a__ = "bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : int = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : List[str] = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : Optional[Any] = intermediate_size _UpperCamelCase : Union[str, Any] = hidden_dropout_prob _UpperCamelCase : Tuple = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : List[str] = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Any = use_cache _UpperCamelCase : Any = classifier_dropout class lowercase ( _lowercase ): """simple docstring""" @property def A__ ( self): if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
648
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = { """configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""], """tokenization_lxmert""": ["""LxmertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""LxmertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """LxmertEncoder""", """LxmertForPreTraining""", """LxmertForQuestionAnswering""", """LxmertModel""", """LxmertPreTrainedModel""", """LxmertVisualFeatureEncoder""", """LxmertXLayer""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLxmertForPreTraining""", """TFLxmertMainLayer""", """TFLxmertModel""", """TFLxmertPreTrainedModel""", """TFLxmertVisualFeatureEncoder""", ] if TYPE_CHECKING: from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig from .tokenization_lxmert import LxmertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_lxmert_fast import LxmertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase ( _lowercase ): """simple docstring""" a__ = "facebook/bart-large-mnli" a__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a__ = "text_classifier" a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ["text", ["text"]] a__ = ["text"] def A__ ( self): super().setup() _UpperCamelCase : List[Any] = self.model.config _UpperCamelCase : Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): _UpperCamelCase : Tuple = int(__snake_case) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : List[Any] = labels return self.pre_processor( [text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A__ ( self , __snake_case): _UpperCamelCase : str = outputs.logits _UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
648
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """google/vivit-b-16x2-kinetics400""": ( """https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json""" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class lowercase ( _lowercase ): """simple docstring""" a__ = "vivit" def __init__( self , __snake_case=2_24 , __snake_case=32 , __snake_case=[2, 16, 16] , __snake_case=3 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu_fast" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-06 , __snake_case=True , **__snake_case , ): _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Optional[int] = num_attention_heads _UpperCamelCase : List[Any] = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : List[str] = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : int = initializer_range _UpperCamelCase : str = layer_norm_eps _UpperCamelCase : int = image_size _UpperCamelCase : Any = num_frames _UpperCamelCase : Union[str, Any] = tubelet_size _UpperCamelCase : Union[str, Any] = num_channels _UpperCamelCase : Optional[int] = qkv_bias super().__init__(**__snake_case)
648
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
from __future__ import annotations from math import gcd def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : int = 3 , ) -> int | None: '''simple docstring''' if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int: return (pow(UpperCAmelCase_ , 2 ) + step) % modulus for _ in range(UpperCAmelCase_ ): # These track the position within the cycle detection logic. _UpperCamelCase : Optional[Any] = seed _UpperCamelCase : Optional[int] = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. _UpperCamelCase : Optional[int] = rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Any = rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : str = rand_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. _UpperCamelCase : Any = gcd(hare - tortoise , UpperCAmelCase_ ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. _UpperCamelCase : int = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument( """num""", type=int, help="""The value to find a divisor of""", ) parser.add_argument( """--attempts""", type=int, default=3, help="""The number of attempts before giving up""", ) lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'{args.num} is probably prime') else: lowerCAmelCase__ = args.num // divisor print(f'{args.num} = {divisor} * {quotient}')
648
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
648
1
import collections import os import re from pathlib import Path lowerCAmelCase__ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase__ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase__ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase__ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase__ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase__ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase__ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase__ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase__ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase__ = re.compile(R"""^\s*else:""") def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> str: '''simple docstring''' if _re_test_backend.search(UpperCAmelCase_ ) is None: return None _UpperCamelCase : List[Any] = [b[0] for b in _re_backend.findall(UpperCAmelCase_ )] backends.sort() return "_and_".join(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] ) -> int: '''simple docstring''' with open(UpperCAmelCase_ , 'r' , encoding='utf-8' , newline='\n' ) as f: _UpperCamelCase : Union[str, Any] = f.readlines() _UpperCamelCase : List[Any] = 0 while line_index < len(UpperCAmelCase_ ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCAmelCase_ ): return None # First grab the objects without a specific backend in _import_structure _UpperCamelCase : List[Any] = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: _UpperCamelCase : Tuple = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCAmelCase_ ): _UpperCamelCase : List[Any] = _re_one_line_import_struct.search(UpperCAmelCase_ ).groups()[0] _UpperCamelCase : Optional[Any] = re.findall(R'\[([^\]]+)\]' , UpperCAmelCase_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue _UpperCamelCase : Any = _re_import_struct_key_value.search(UpperCAmelCase_ ) if single_line_import_search is not None: _UpperCamelCase : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(UpperCAmelCase_ ) > 0] objects.extend(UpperCAmelCase_ ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 _UpperCamelCase : Optional[Any] = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. _UpperCamelCase : Dict = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCamelCase : Optional[int] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCamelCase : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): _UpperCamelCase : Optional[int] = lines[line_index] if _re_import_struct_add_one.search(UpperCAmelCase_ ) is not None: objects.append(_re_import_struct_add_one.search(UpperCAmelCase_ ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCAmelCase_ ) is not None: _UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(UpperCAmelCase_ ).groups()[0].split(', ' ) _UpperCamelCase : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0] objects.extend(UpperCAmelCase_ ) elif _re_between_brackets.search(UpperCAmelCase_ ) is not None: _UpperCamelCase : Optional[Any] = _re_between_brackets.search(UpperCAmelCase_ ).groups()[0].split(', ' ) _UpperCamelCase : Optional[Any] = [obj[1:-1] for obj in imports if len(UpperCAmelCase_ ) > 0] objects.extend(UpperCAmelCase_ ) elif _re_quote_object.search(UpperCAmelCase_ ) is not None: objects.append(_re_quote_object.search(UpperCAmelCase_ ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 1_2 + '"' ): objects.append(line[1_3:-3] ) line_index += 1 _UpperCamelCase : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend _UpperCamelCase : Union[str, Any] = [] while ( line_index < len(UpperCAmelCase_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): _UpperCamelCase : Optional[int] = lines[line_index] _UpperCamelCase : Tuple = _re_import.search(UpperCAmelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 _UpperCamelCase : int = {'none': objects} # Let's continue with backend-specific objects while line_index < len(UpperCAmelCase_ ): # If the line is an if is_backend_available, we grab all objects associated. _UpperCamelCase : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: _UpperCamelCase : Optional[Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 _UpperCamelCase : Tuple = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): _UpperCamelCase : Tuple = lines[line_index] _UpperCamelCase : List[Any] = _re_import.search(UpperCAmelCase_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 1_2 ): objects.append(line[1_2:-2] ) line_index += 1 _UpperCamelCase : List[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' def find_duplicates(UpperCAmelCase_ : Tuple ): return [k for k, v in collections.Counter(UpperCAmelCase_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] _UpperCamelCase : str = [] for key in import_dict_objects.keys(): _UpperCamelCase : List[Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) _UpperCamelCase : Optional[int] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): _UpperCamelCase : Dict = 'base imports' if key == 'none' else F'''{key} backend''' errors.append(F'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def lowerCamelCase_ ( ) -> int: '''simple docstring''' _UpperCamelCase : List[Any] = [] for root, _, files in os.walk(UpperCAmelCase_ ): if "__init__.py" in files: _UpperCamelCase : List[Any] = os.path.join(UpperCAmelCase_ , '__init__.py' ) _UpperCamelCase : List[str] = parse_init(UpperCAmelCase_ ) if objects is not None: _UpperCamelCase : Optional[Any] = analyze_results(*UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: _UpperCamelCase : List[Any] = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('\n'.join(UpperCAmelCase_ ) ) if len(UpperCAmelCase_ ) > 0: raise ValueError('\n\n'.join(UpperCAmelCase_ ) ) def lowerCamelCase_ ( ) -> Any: '''simple docstring''' _UpperCamelCase : int = [] for path, directories, files in os.walk(UpperCAmelCase_ ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(UpperCAmelCase_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCAmelCase_ ) / folder).glob('*.py' ) ) ) == 0: continue _UpperCamelCase : str = str((Path(UpperCAmelCase_ ) / folder).relative_to(UpperCAmelCase_ ) ) _UpperCamelCase : List[str] = short_path.replace(os.path.sep , '.' ) submodules.append(UpperCAmelCase_ ) for fname in files: if fname == "__init__.py": continue _UpperCamelCase : Any = str((Path(UpperCAmelCase_ ) / fname).relative_to(UpperCAmelCase_ ) ) _UpperCamelCase : Optional[int] = short_path.replace('.py' , '' ).replace(os.path.sep , '.' ) if len(submodule.split('.' ) ) == 1: submodules.append(UpperCAmelCase_ ) return submodules lowerCAmelCase__ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def lowerCamelCase_ ( ) -> Optional[Any]: '''simple docstring''' from transformers.utils import direct_transformers_import _UpperCamelCase : Dict = direct_transformers_import(UpperCAmelCase_ ) _UpperCamelCase : List[str] = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCAmelCase_ , '__init__.py' ) , 'r' ) as f: _UpperCamelCase : str = f.read() import_structure_keys.update(set(re.findall(R'import_structure\[\"([^\"]*)\"\]' , UpperCAmelCase_ ) ) ) _UpperCamelCase : Any = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCAmelCase_ ) > 0: _UpperCamelCase : Any = '\n'.join(F'''- {module}''' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registed in the main init of Transformers:\n' F'''{list_of_modules}\n''' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
648
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowerCAmelCase__ = 5 lowerCAmelCase__ = 1_0 @require_sentencepiece @require_tokenizers class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = SpeechaTextTokenizer a__ = False a__ = True def A__ ( self): super().setUp() _UpperCamelCase : Any = sp.SentencePieceProcessor() spm_model.Load(__snake_case) _UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))] _UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case)))) _UpperCamelCase : Tuple = Path(self.tmpdirname) save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file']) _UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def A__ ( self): _UpperCamelCase : str = '<pad>' _UpperCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(__snake_case) , 10_01) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_01) def A__ ( self): _UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) _UpperCamelCase : List[str] = tokenizer.tokenize('This is a test') self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , ) _UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case) self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8]) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def A__ ( self): # fmt: off _UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" a__ = "valhalla/s2t_mustc_multilinguial_medium" a__ = "C'est trop cool" a__ = "Esto es genial" @classmethod def A__ ( cls): _UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def A__ ( self): self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11) def A__ ( self): self.assertEqual(self.tokenizer.vocab_size , 1_00_00) def A__ ( self): self.assertIn(__snake_case , self.tokenizer.all_special_ids) _UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2] _UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case) _UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case) self.assertEqual(__snake_case , __snake_case) self.assertNotIn(self.tokenizer.eos_token , __snake_case) def A__ ( self): _UpperCamelCase : Any = 'fr' _UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , __snake_case) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def A__ ( self): _UpperCamelCase : Union[str, Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) _UpperCamelCase : List[str] = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
648
1
from __future__ import annotations def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list: '''simple docstring''' if len(UpperCAmelCase_ ) == 0: return [] _UpperCamelCase , _UpperCamelCase : Any = min(UpperCAmelCase_ ), max(UpperCAmelCase_ ) _UpperCamelCase : List[Any] = int(max_value - min_value ) + 1 _UpperCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )] for i in my_list: buckets[int(i - min_value )].append(UpperCAmelCase_ ) return [v for bucket in buckets for v in sorted(UpperCAmelCase_ )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
648
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class lowercase ( _lowercase ): """simple docstring""" a__ = "masked_bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Any = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : str = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : Dict = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : List[Any] = layer_norm_eps _UpperCamelCase : Tuple = pruning_method _UpperCamelCase : Tuple = mask_init _UpperCamelCase : Dict = mask_scale
648
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = DebertaVaTokenizer a__ = DebertaVaTokenizerFast a__ = True a__ = True def A__ ( self): super().setUp() # We have a SentencePiece fixture for testing _UpperCamelCase : List[str] = DebertaVaTokenizer(__snake_case , unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def A__ ( self , __snake_case): _UpperCamelCase : List[Any] = 'this is a test' _UpperCamelCase : int = 'this is a test' return input_text, output_text def A__ ( self): _UpperCamelCase : List[str] = '<pad>' _UpperCamelCase : Union[str, Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<pad>') self.assertEqual(vocab_keys[1] , '<unk>') self.assertEqual(vocab_keys[-1] , '[PAD]') self.assertEqual(len(__snake_case) , 3_00_01) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00) def A__ ( self): # fmt: off _UpperCamelCase : Union[str, Any] = ' \tHeLLo!how \n Are yoU? ' _UpperCamelCase : Tuple = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on _UpperCamelCase : str = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case) _UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : int = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case) _UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def A__ ( self): pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def A__ ( self): pass def A__ ( self): # fmt: off _UpperCamelCase : List[str] = 'I was born in 92000, and this is falsé.' _UpperCamelCase : str = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on _UpperCamelCase : int = DebertaVaTokenizer(__snake_case , split_by_punct=__snake_case) _UpperCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Optional[int] = DebertaVaTokenizerFast(__snake_case , split_by_punct=__snake_case) _UpperCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): # fmt: off _UpperCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.' _UpperCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on _UpperCamelCase : Optional[Any] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): # fmt: off _UpperCamelCase : Tuple = 'I was born in 92000, and this is falsé.' _UpperCamelCase : List[Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on _UpperCamelCase : Any = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Optional[Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): # fmt: off _UpperCamelCase : Tuple = 'I was born in 92000, and this is falsé.' _UpperCamelCase : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on _UpperCamelCase : List[str] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : List[str] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): # fmt: off _UpperCamelCase : Optional[Any] = ' \tHeLLo!how \n Are yoU? ' _UpperCamelCase : Dict = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on _UpperCamelCase : List[Any] = DebertaVaTokenizer(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Union[str, Any] = DebertaVaTokenizerFast(__snake_case , do_lower_case=__snake_case , split_by_punct=__snake_case) _UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): _UpperCamelCase : Any = self.get_tokenizer() _UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer() _UpperCamelCase : int = 'I was born in 92000, and this is falsé.' _UpperCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) _UpperCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case) _UpperCamelCase : Optional[int] = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : int = self.get_rust_tokenizer() _UpperCamelCase : List[str] = tokenizer.encode(__snake_case) _UpperCamelCase : List[str] = rust_tokenizer.encode(__snake_case) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): _UpperCamelCase : Dict = 'This is a test' _UpperCamelCase : Tuple = [13, 1, 43_98, 25, 21, 12_89] _UpperCamelCase : Any = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] _UpperCamelCase : Any = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] _UpperCamelCase : Optional[Any] = DebertaVaTokenizer(__snake_case , keep_accents=__snake_case) _UpperCamelCase : Union[str, Any] = DebertaVaTokenizerFast(__snake_case , keep_accents=__snake_case) _UpperCamelCase : Union[str, Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : List[Any] = tokenizer.tokenize(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : int = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Dict = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Dict = rust_tokenizer.tokenize(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual(__snake_case , __snake_case) # fmt: off _UpperCamelCase : Optional[Any] = 'I was born in 92000, and this is falsé.' _UpperCamelCase : List[str] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] _UpperCamelCase : List[str] = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] _UpperCamelCase : Any = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on _UpperCamelCase : Any = tokenizer.encode(__snake_case , add_special_tokens=__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : str = tokenizer.tokenize(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Any = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : int = rust_tokenizer.tokenize(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : int = rust_tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual(__snake_case , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = DebertaVaTokenizer(__snake_case) _UpperCamelCase : Tuple = tokenizer.encode('sequence builders') _UpperCamelCase : List[Any] = tokenizer.encode('multi-sequence build') _UpperCamelCase : str = tokenizer.build_inputs_with_special_tokens(__snake_case) _UpperCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __snake_case) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __snake_case , ) @slow def A__ ( self): # fmt: off _UpperCamelCase : Optional[Any] = {'input_ids': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
648
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self , __snake_case=32): set_seed(0) _UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3) _UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1) return model, optimizer @slow def A__ ( self): _UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) _UpperCamelCase : List[Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) _UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)] _UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)] _UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)] # train with a DDPM scheduler _UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample _UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample _UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5)) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
648
1
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> str: '''simple docstring''' _UpperCamelCase : List[str] = int(UpperCAmelCase_ ) if decimal in (0, 1): # Exit cases for the recursion return str(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : List[str] = divmod(UpperCAmelCase_ , 2 ) return binary_recursive(UpperCAmelCase_ ) + str(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' _UpperCamelCase : Any = str(UpperCAmelCase_ ).strip() if not number: raise ValueError('No input value was provided' ) _UpperCamelCase : int = '-' if number.startswith('-' ) else '' _UpperCamelCase : List[Any] = number.lstrip('-' ) if not number.isnumeric(): raise ValueError('Input value is not an integer' ) return F'''{negative}0b{binary_recursive(int(UpperCAmelCase_ ) )}''' if __name__ == "__main__": from doctest import testmod testmod()
648
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase__ = { """sample_size""": 3_2, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [3_2, 6_4], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 6_4, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 2_5_6, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """num_train_timesteps""": 4_0, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 1_5_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str: '''simple docstring''' _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias'''] _UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Optional[Any] = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : Union[str, Any] = {} _UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight'] _UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias'] _UpperCamelCase : Dict = checkpoint['time_embed.2.weight'] _UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _UpperCamelCase : List[str] = checkpoint['label_emb.weight'] _UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight'] _UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _UpperCamelCase : Optional[int] = unet_config['down_block_types'] _UpperCamelCase : Optional[Any] = unet_config['layers_per_block'] _UpperCamelCase : Dict = unet_config['attention_head_dim'] _UpperCamelCase : List[str] = unet_config['block_out_channels'] _UpperCamelCase : str = 1 _UpperCamelCase : Optional[int] = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): _UpperCamelCase : List[str] = channels_list[i] _UpperCamelCase : str = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : str = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1''' _UpperCamelCase : Dict = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 _UpperCamelCase : Tuple = current_channels # hardcoded the mid-block for now _UpperCamelCase : Any = 'mid_block.resnets.0' _UpperCamelCase : Optional[Any] = 'middle_block.0' _UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = 'mid_block.attentions.0' _UpperCamelCase : Tuple = 'middle_block.1' _UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Tuple = 'mid_block.resnets.1' _UpperCamelCase : str = 'middle_block.2' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Optional[int] = unet_config['up_block_types'] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}''' _UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1''' _UpperCamelCase : Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2''' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = checkpoint['out.0.weight'] _UpperCamelCase : str = checkpoint['out.0.bias'] _UpperCamelCase : int = checkpoint['out.2.weight'] _UpperCamelCase : List[Any] = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = strabool(args.class_cond) lowerCAmelCase__ = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase__ = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: lowerCAmelCase__ = None lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
648
1
def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> str: '''simple docstring''' return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
648
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list: '''simple docstring''' if len(UpperCAmelCase_ ) <= 1: return [tuple(UpperCAmelCase_ )] _UpperCamelCase : List[Any] = [] def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ): _UpperCamelCase : Optional[int] = [0] * n res.append(tuple(UpperCAmelCase_ ) ) _UpperCamelCase : List[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0] else: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase_ ) ) c[i] += 1 _UpperCamelCase : Tuple = 0 else: _UpperCamelCase : Tuple = 0 i += 1 generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return res if __name__ == "__main__": lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
648
1
def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0_0_0_0_0_0 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , UpperCAmelCase_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
648
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ ) if k.startswith('encoder' ): _UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' ) _UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): _UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' ) return k def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: _UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ ) _UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd _UpperCamelCase : Tuple = v lowerCAmelCase__ = ["""START"""] @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]: '''simple docstring''' _UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : int = model['model'] _UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ ) _UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ ) _UpperCamelCase : int = m.model.state_dict().keys() _UpperCamelCase : Union[str, Any] = [] _UpperCamelCase : int = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase : int = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCAmelCase_ ) m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) m.half() m.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) lowerCAmelCase__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
648
1
from __future__ import annotations def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None ) -> list[list[str]]: '''simple docstring''' _UpperCamelCase : List[Any] = word_bank or [] # create a table _UpperCamelCase : int = len(UpperCAmelCase_ ) + 1 _UpperCamelCase : list[list[list[str]]] = [] for _ in range(UpperCAmelCase_ ): table.append([] ) # seed value _UpperCamelCase : Union[str, Any] = [[]] # because empty string has empty combination # iterate through the indices for i in range(UpperCAmelCase_ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(UpperCAmelCase_ )] == word: _UpperCamelCase : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(UpperCAmelCase_ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(UpperCAmelCase_ )]: combination.reverse() return table[len(UpperCAmelCase_ )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
648
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""] lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class lowercase ( tf.keras.Model ): """simple docstring""" def __init__( self , __snake_case): super().__init__() _UpperCamelCase : List[Any] = tokenizer _UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case) _UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : Any = self.tokenizer(__snake_case) _UpperCamelCase : Dict = self.bert(**__snake_case) return out["pooler_output"] @require_tf @require_tensorflow_text class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): super().setUp() _UpperCamelCase : Optional[Any] = [ BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) _UpperCamelCase : Optional[Any] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] _UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1])) def A__ ( self): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest') _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences) _UpperCamelCase : Optional[Any] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf.function(__snake_case) for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : Optional[int] = tf.constant(__snake_case) _UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case) _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case) _UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences) _UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _UpperCamelCase : int = Path(__snake_case) / 'saved.model' model.save(__snake_case) _UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case) _UpperCamelCase : int = loaded_model(__snake_case) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
648
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> List[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: _UpperCamelCase : Any = [1_4_4, 1_9_2, 2_4_0] _UpperCamelCase : Any = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0] elif "mobilevit_xs" in mobilevit_name: _UpperCamelCase : Union[str, Any] = [9_6, 1_2_0, 1_4_4] _UpperCamelCase : Dict = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4] elif "mobilevit_xxs" in mobilevit_name: _UpperCamelCase : Dict = [6_4, 8_0, 9_6] _UpperCamelCase : List[Any] = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0] _UpperCamelCase : List[str] = 0.0_5 _UpperCamelCase : int = 2.0 if mobilevit_name.startswith('deeplabv3_' ): _UpperCamelCase : Tuple = 5_1_2 _UpperCamelCase : Optional[int] = 1_6 _UpperCamelCase : List[str] = 2_1 _UpperCamelCase : Dict = 'pascal-voc-id2label.json' else: _UpperCamelCase : Optional[Any] = 1_0_0_0 _UpperCamelCase : Union[str, Any] = 'imagenet-1k-id2label.json' _UpperCamelCase : int = 'huggingface/label-files' _UpperCamelCase : List[str] = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _UpperCamelCase : Dict = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()} _UpperCamelCase : int = idalabel _UpperCamelCase : List[Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' for i in range(1 , 6 ): if F'''layer_{i}.''' in name: _UpperCamelCase : int = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: _UpperCamelCase : List[Any] = name.replace('conv_1.' , 'conv_stem.' ) if ".block." in name: _UpperCamelCase : Optional[int] = name.replace('.block.' , '.' ) if "exp_1x1" in name: _UpperCamelCase : str = name.replace('exp_1x1' , 'expand_1x1' ) if "red_1x1" in name: _UpperCamelCase : Dict = name.replace('red_1x1' , 'reduce_1x1' ) if ".local_rep.conv_3x3." in name: _UpperCamelCase : List[Any] = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' ) if ".local_rep.conv_1x1." in name: _UpperCamelCase : Optional[int] = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' ) if ".norm." in name: _UpperCamelCase : List[str] = name.replace('.norm.' , '.normalization.' ) if ".conv." in name: _UpperCamelCase : Tuple = name.replace('.conv.' , '.convolution.' ) if ".conv_proj." in name: _UpperCamelCase : List[Any] = name.replace('.conv_proj.' , '.conv_projection.' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: _UpperCamelCase : Optional[int] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: _UpperCamelCase : Tuple = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' ) if "expand_1x1" in name: _UpperCamelCase : Dict = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' ) if "conv_3x3" in name: _UpperCamelCase : List[str] = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' ) if "reduce_1x1" in name: _UpperCamelCase : List[Any] = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' ) for i in range(2 , 5 ): if F'''.global_rep.{i}.weight''' in name: _UpperCamelCase : str = name.replace(F'''.global_rep.{i}.weight''' , '.layernorm.weight' ) if F'''.global_rep.{i}.bias''' in name: _UpperCamelCase : List[str] = name.replace(F'''.global_rep.{i}.bias''' , '.layernorm.bias' ) if ".global_rep." in name: _UpperCamelCase : Optional[int] = name.replace('.global_rep.' , '.transformer.' ) if ".pre_norm_mha.0." in name: _UpperCamelCase : Optional[Any] = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' ) if ".pre_norm_mha.1.out_proj." in name: _UpperCamelCase : str = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' ) if ".pre_norm_ffn.0." in name: _UpperCamelCase : List[Any] = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' ) if ".pre_norm_ffn.1." in name: _UpperCamelCase : int = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' ) if ".pre_norm_ffn.4." in name: _UpperCamelCase : Union[str, Any] = name.replace('.pre_norm_ffn.4.' , '.output.dense.' ) if ".transformer." in name: _UpperCamelCase : Tuple = name.replace('.transformer.' , '.transformer.layer.' ) if ".aspp_layer." in name: _UpperCamelCase : List[Any] = name.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in name: _UpperCamelCase : Optional[int] = name.replace('.aspp_pool.' , '.' ) if "seg_head." in name: _UpperCamelCase : Optional[Any] = name.replace('seg_head.' , 'segmentation_head.' ) if "segmentation_head.classifier.classifier." in name: _UpperCamelCase : Optional[int] = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' ) if "classifier.fc." in name: _UpperCamelCase : List[str] = name.replace('classifier.fc.' , 'classifier.' ) elif (not base_model) and ("segmentation_head." not in name): _UpperCamelCase : Dict = 'mobilevit.' + name return name def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=False ) -> Union[str, Any]: '''simple docstring''' if base_model: _UpperCamelCase : Union[str, Any] = '' else: _UpperCamelCase : Union[str, Any] = 'mobilevit.' for key in orig_state_dict.copy().keys(): _UpperCamelCase : Union[str, Any] = orig_state_dict.pop(UpperCAmelCase_ ) if key[:8] == "encoder.": _UpperCamelCase : Optional[int] = key[8:] if "qkv" in key: _UpperCamelCase : Optional[int] = key.split('.' ) _UpperCamelCase : Optional[Any] = int(key_split[0][6:] ) - 1 _UpperCamelCase : int = int(key_split[3] ) _UpperCamelCase : Tuple = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' ) _UpperCamelCase : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size _UpperCamelCase : str = ( F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: _UpperCamelCase : Union[str, Any] = val[:dim, :] _UpperCamelCase : Optional[Any] = val[dim : dim * 2, :] _UpperCamelCase : Optional[int] = val[-dim:, :] else: _UpperCamelCase : Dict = val[:dim] _UpperCamelCase : str = val[dim : dim * 2] _UpperCamelCase : Optional[int] = val[-dim:] else: _UpperCamelCase : Union[str, Any] = val return orig_state_dict def lowerCamelCase_ ( ) -> Any: '''simple docstring''' _UpperCamelCase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' _UpperCamelCase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ) return im @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False ) -> int: '''simple docstring''' _UpperCamelCase : Tuple = get_mobilevit_config(UpperCAmelCase_ ) # load original state_dict _UpperCamelCase : List[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' ) # load 🤗 model if mobilevit_name.startswith('deeplabv3_' ): _UpperCamelCase : Tuple = MobileViTForSemanticSegmentation(UpperCAmelCase_ ).eval() else: _UpperCamelCase : List[str] = MobileViTForImageClassification(UpperCAmelCase_ ).eval() _UpperCamelCase : int = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ ) model.load_state_dict(UpperCAmelCase_ ) # Check outputs on an image, prepared by MobileViTImageProcessor _UpperCamelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 ) _UpperCamelCase : List[str] = image_processor(images=prepare_img() , return_tensors='pt' ) _UpperCamelCase : List[str] = model(**UpperCAmelCase_ ) _UpperCamelCase : List[str] = outputs.logits if mobilevit_name.startswith('deeplabv3_' ): assert logits.shape == (1, 2_1, 3_2, 3_2) if mobilevit_name == "deeplabv3_mobilevit_s": _UpperCamelCase : List[str] = torch.tensor( [ [[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]], [[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]], [[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": _UpperCamelCase : Optional[Any] = torch.tensor( [ [[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]], [[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]], [[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": _UpperCamelCase : Optional[Any] = torch.tensor( [ [[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]], [[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]], [[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]], ] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) else: assert logits.shape == (1, 1_0_0_0) if mobilevit_name == "mobilevit_s": _UpperCamelCase : Tuple = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] ) elif mobilevit_name == "mobilevit_xs": _UpperCamelCase : Optional[Any] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] ) elif mobilevit_name == "mobilevit_xxs": _UpperCamelCase : List[Any] = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ ) print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCAmelCase_ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCAmelCase_ ) if push_to_hub: _UpperCamelCase : Tuple = { 'mobilevit_s': 'mobilevit-small', 'mobilevit_xs': 'mobilevit-x-small', 'mobilevit_xxs': 'mobilevit-xx-small', 'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small', 'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small', 'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small', } print('Pushing to the hub...' ) _UpperCamelCase : int = model_mapping[mobilevit_name] image_processor.push_to_hub(UpperCAmelCase_ , organization='apple' ) model.push_to_hub(UpperCAmelCase_ , organization='apple' ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--mobilevit_name""", default="""mobilevit_s""", type=str, help=( """Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',""" """ 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'.""" ), ) parser.add_argument( """--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCAmelCase__ = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
648
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class lowercase ( _lowercase ): """simple docstring""" a__ = "bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : int = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : List[str] = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : Optional[Any] = intermediate_size _UpperCamelCase : Union[str, Any] = hidden_dropout_prob _UpperCamelCase : Tuple = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : List[str] = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Any = use_cache _UpperCamelCase : Any = classifier_dropout class lowercase ( _lowercase ): """simple docstring""" @property def A__ ( self): if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
648
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : Optional[int] = image_size _UpperCamelCase : str = num_channels _UpperCamelCase : Optional[Any] = embeddings_size _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : str = is_training _UpperCamelCase : Optional[int] = use_labels _UpperCamelCase : str = hidden_act _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Tuple = len(__snake_case) _UpperCamelCase : Dict = out_features _UpperCamelCase : Union[str, Any] = out_indices _UpperCamelCase : int = num_groups def A__ ( self): _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : str = None if self.use_labels: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def A__ ( self): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = BitModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Dict = self.num_labels _UpperCamelCase : Dict = BitForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _UpperCamelCase : Any = None _UpperCamelCase : str = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self): _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Dict = BitModelTester(self) _UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='Bit does not output attentions') def A__ ( self): pass @unittest.skip(reason='Bit does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='Bit does not support input and output embeddings') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(__snake_case) _UpperCamelCase : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Optional[int] = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(config=__snake_case) for name, module in model.named_modules(): if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def A__ ( self): def check_hidden_states_output(__snake_case , __snake_case , __snake_case): _UpperCamelCase : str = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : str = self.model_tester.num_stages self.assertEqual(len(__snake_case) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : Any = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[str] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) @unittest.skip(reason='Bit does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def A__ ( self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): _UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Any = model(**__snake_case) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def A__ ( self): _UpperCamelCase : List[str] = BitModelTester(self)
648
1
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> int: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def lowerCamelCase_ ( ) -> int: '''simple docstring''' _UpperCamelCase : Dict = 'mock-s3-bucket' _UpperCamelCase : List[Any] = F'''s3://{mock_bucket}''' _UpperCamelCase : Dict = extract_path_from_uri(UpperCAmelCase_ ) assert dataset_path.startswith('s3://' ) is False _UpperCamelCase : Optional[Any] = './local/path' _UpperCamelCase : List[str] = extract_path_from_uri(UpperCAmelCase_ ) assert dataset_path == new_dataset_path def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : Tuple = is_remote_filesystem(UpperCAmelCase_ ) assert is_remote is True _UpperCamelCase : Tuple = fsspec.filesystem('file' ) _UpperCamelCase : Tuple = is_remote_filesystem(UpperCAmelCase_ ) assert is_remote is False @pytest.mark.parametrize('compression_fs_class' , UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] ) -> str: '''simple docstring''' _UpperCamelCase : Dict = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file} _UpperCamelCase : int = input_paths[compression_fs_class.protocol] if input_path is None: _UpperCamelCase : List[str] = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = fsspec.filesystem(compression_fs_class.protocol , fo=UpperCAmelCase_ ) assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[str] = os.path.basename(UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = expected_filename[: expected_filename.rindex('.' )] assert fs.glob('*' ) == [expected_filename] with fs.open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f, open(UpperCAmelCase_ , encoding='utf-8' ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize('protocol' , ['zip', 'gzip'] ) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : int = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path} _UpperCamelCase : Optional[Any] = compressed_file_paths[protocol] _UpperCamelCase : Union[str, Any] = 'dataset.jsonl' _UpperCamelCase : Optional[int] = F'''{protocol}://{member_file_path}::{compressed_file_path}''' _UpperCamelCase , *_UpperCamelCase : str = fsspec.get_fs_token_paths(UpperCAmelCase_ ) assert fs.isfile(UpperCAmelCase_ ) assert not fs.isfile('non_existing_' + member_file_path ) @pytest.mark.integration def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Optional[int] = hf_api.dataset_info(UpperCAmelCase_ , token=UpperCAmelCase_ ) _UpperCamelCase : Any = HfFileSystem(repo_info=UpperCAmelCase_ , token=UpperCAmelCase_ ) assert sorted(hffs.glob('*' ) ) == [".gitattributes", "data"] assert hffs.isdir('data' ) assert hffs.isfile('.gitattributes' ) and hffs.isfile('data/text_data.txt' ) with open(UpperCAmelCase_ ) as f: assert hffs.open('data/text_data.txt' , 'r' ).read() == f.read() def lowerCamelCase_ ( ) -> Dict: '''simple docstring''' _UpperCamelCase : List[Any] = 'bz2' # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(UpperCAmelCase_ , UpperCAmelCase_ , clobber=UpperCAmelCase_ ) with pytest.warns(UpperCAmelCase_ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(UpperCAmelCase_ ) == 1 assert ( str(warning_info[0].message ) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
648
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowerCAmelCase__ = numpy.array([0, 0]) lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54]) lowerCAmelCase__ = numpy.array([1, 0]) lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : Tuple = initial_vectors for _ in range(UpperCAmelCase_ ): _UpperCamelCase : str = iteration_step(UpperCAmelCase_ ) return vectors def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : int = [] for i, start_vector in enumerate(vectors[:-1] ): _UpperCamelCase : Union[str, Any] = vectors[i + 1] new_vectors.append(UpperCAmelCase_ ) _UpperCamelCase : Tuple = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray: '''simple docstring''' _UpperCamelCase : str = numpy.radians(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ ) _UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) ) return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None: '''simple docstring''' _UpperCamelCase : str = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ ) plt.plot(UpperCAmelCase_ , UpperCAmelCase_ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
648
1
import string def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> None: '''simple docstring''' for key in range(len(string.ascii_uppercase ) ): _UpperCamelCase : List[Any] = '' for symbol in message: if symbol in string.ascii_uppercase: _UpperCamelCase : Optional[int] = string.ascii_uppercase.find(UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = num - key if num < 0: _UpperCamelCase : List[str] = num + len(string.ascii_uppercase ) _UpperCamelCase : Union[str, Any] = translated + string.ascii_uppercase[num] else: _UpperCamelCase : Optional[Any] = translated + symbol print(F'''Decryption using Key #{key}: {translated}''' ) def lowerCamelCase_ ( ) -> None: '''simple docstring''' _UpperCamelCase : Any = input('Encrypted message: ' ) _UpperCamelCase : Any = message.upper() decrypt(UpperCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
648
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple: '''simple docstring''' if exitstatus == 5: _UpperCamelCase : List[Any] = 0 # Doctest custom flag to ignore output. lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""") lowerCAmelCase__ = doctest.OutputChecker class lowercase ( _lowercase ): """simple docstring""" def A__ ( self , __snake_case , __snake_case , __snake_case): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case) lowerCAmelCase__ = CustomOutputChecker lowerCAmelCase__ = HfDoctestModule lowerCAmelCase__ = HfDocTestParser
648
1
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class lowercase ( _lowercase ): """simple docstring""" a__ = "bart" a__ = ["past_key_values"] a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self , __snake_case=5_02_65 , __snake_case=10_24 , __snake_case=12 , __snake_case=40_96 , __snake_case=16 , __snake_case=12 , __snake_case=40_96 , __snake_case=16 , __snake_case=0.0 , __snake_case=0.0 , __snake_case="gelu" , __snake_case=10_24 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=0.0 , __snake_case=False , __snake_case=True , __snake_case=3 , __snake_case=1 , __snake_case=0 , __snake_case=2 , __snake_case=True , __snake_case=2 , __snake_case=2 , **__snake_case , ): _UpperCamelCase : Any = vocab_size _UpperCamelCase : Optional[Any] = max_position_embeddings _UpperCamelCase : Tuple = d_model _UpperCamelCase : Optional[int] = encoder_ffn_dim _UpperCamelCase : Union[str, Any] = encoder_layers _UpperCamelCase : Tuple = encoder_attention_heads _UpperCamelCase : List[str] = decoder_ffn_dim _UpperCamelCase : Union[str, Any] = decoder_layers _UpperCamelCase : List[Any] = decoder_attention_heads _UpperCamelCase : Any = dropout _UpperCamelCase : int = attention_dropout _UpperCamelCase : List[Any] = activation_dropout _UpperCamelCase : List[str] = activation_function _UpperCamelCase : Tuple = init_std _UpperCamelCase : str = encoder_layerdrop _UpperCamelCase : Optional[int] = decoder_layerdrop _UpperCamelCase : Optional[int] = classifier_dropout _UpperCamelCase : Union[str, Any] = use_cache _UpperCamelCase : Dict = encoder_layers _UpperCamelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , is_encoder_decoder=__snake_case , decoder_start_token_id=__snake_case , forced_eos_token_id=__snake_case , **__snake_case , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , __snake_case): _UpperCamelCase : int = self.bos_token_id warnings.warn( f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. ''' 'The config can simply be saved and uploaded again to be fixed.') class lowercase ( _lowercase ): """simple docstring""" @property def A__ ( self): if self.task in ["default", "seq2seq-lm"]: _UpperCamelCase : int = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: _UpperCamelCase : str = {0: 'batch'} _UpperCamelCase : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: _UpperCamelCase : Tuple = {0: 'batch', 1: 'decoder_sequence'} _UpperCamelCase : List[Any] = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__snake_case , direction='inputs') elif self.task == "causal-lm": # TODO: figure this case out. _UpperCamelCase : Optional[int] = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ]) if self.use_past: _UpperCamelCase , _UpperCamelCase : Tuple = self.num_layers for i in range(__snake_case): _UpperCamelCase : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'} _UpperCamelCase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'} else: _UpperCamelCase : Any = OrderedDict( [ ('input_ids', {0: 'batch', 1: 'encoder_sequence'}), ('attention_mask', {0: 'batch', 1: 'encoder_sequence'}), ('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}), ('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}), ]) return common_inputs @property def A__ ( self): if self.task in ["default", "seq2seq-lm"]: _UpperCamelCase : Optional[int] = super().outputs else: _UpperCamelCase : Any = super(__snake_case , self).outputs if self.use_past: _UpperCamelCase , _UpperCamelCase : List[Any] = self.num_layers for i in range(__snake_case): _UpperCamelCase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'} _UpperCamelCase : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'} return common_outputs def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ): _UpperCamelCase : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case) # Generate decoder inputs _UpperCamelCase : Union[str, Any] = seq_length if not self.use_past else 1 _UpperCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case) _UpperCamelCase : List[str] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} _UpperCamelCase : Union[str, Any] = dict(**__snake_case , **__snake_case) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch _UpperCamelCase , _UpperCamelCase : Tuple = common_inputs['input_ids'].shape _UpperCamelCase : Optional[int] = common_inputs['decoder_input_ids'].shape[1] _UpperCamelCase , _UpperCamelCase : List[Any] = self.num_attention_heads _UpperCamelCase : Union[str, Any] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _UpperCamelCase : List[Any] = decoder_seq_length + 3 _UpperCamelCase : Optional[int] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _UpperCamelCase : Dict = torch.cat( [common_inputs['decoder_attention_mask'], torch.ones(__snake_case , __snake_case)] , dim=1) _UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered _UpperCamelCase , _UpperCamelCase : Any = self.num_layers _UpperCamelCase : int = min(__snake_case , __snake_case) _UpperCamelCase : Optional[int] = max(__snake_case , __snake_case) - min_num_layers _UpperCamelCase : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder' for _ in range(__snake_case): common_inputs["past_key_values"].append( ( torch.zeros(__snake_case), torch.zeros(__snake_case), torch.zeros(__snake_case), torch.zeros(__snake_case), )) # TODO: test this. _UpperCamelCase : List[Any] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape for _ in range(__snake_case , __snake_case): common_inputs["past_key_values"].append((torch.zeros(__snake_case), torch.zeros(__snake_case))) return common_inputs def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ): _UpperCamelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case) if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch _UpperCamelCase , _UpperCamelCase : Dict = common_inputs['input_ids'].shape # Not using the same length for past_key_values _UpperCamelCase : Tuple = seqlen + 2 _UpperCamelCase , _UpperCamelCase : Tuple = self.num_layers _UpperCamelCase , _UpperCamelCase : str = self.num_attention_heads _UpperCamelCase : Tuple = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _UpperCamelCase : Optional[int] = common_inputs['attention_mask'].dtype _UpperCamelCase : Optional[Any] = torch.cat( [common_inputs['attention_mask'], torch.ones(__snake_case , __snake_case , dtype=__snake_case)] , dim=1) _UpperCamelCase : List[Any] = [ (torch.zeros(__snake_case), torch.zeros(__snake_case)) for _ in range(__snake_case) ] return common_inputs def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX _UpperCamelCase : int = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCamelCase : Optional[int] = tokenizer.num_special_tokens_to_add(__snake_case) _UpperCamelCase : Tuple = compute_effective_axis_dimension( __snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__snake_case) # Generate dummy inputs according to compute batch and sequence _UpperCamelCase : Union[str, Any] = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size _UpperCamelCase : Any = dict(tokenizer(__snake_case , return_tensors=__snake_case)) return common_inputs def A__ ( self , __snake_case , __snake_case = -1 , __snake_case = -1 , __snake_case = False , __snake_case = None , ): if self.task in ["default", "seq2seq-lm"]: _UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case) elif self.task == "causal-lm": _UpperCamelCase : str = self._generate_dummy_inputs_for_causal_lm( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case) else: _UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __snake_case , batch_size=__snake_case , seq_length=__snake_case , is_pair=__snake_case , framework=__snake_case) return common_inputs def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case): if self.task in ["default", "seq2seq-lm"]: _UpperCamelCase : Dict = super()._flatten_past_key_values_(__snake_case , __snake_case , __snake_case , __snake_case) else: _UpperCamelCase : int = super(__snake_case , self)._flatten_past_key_values_( __snake_case , __snake_case , __snake_case , __snake_case)
648
lowerCAmelCase__ = range(2, 2_0 + 1) lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)] lowerCAmelCase__ = {} def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) _UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) _UpperCamelCase , _UpperCamelCase : Dict = 0, 0 _UpperCamelCase : Optional[int] = n - i _UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ ) if sub_memo is not None: _UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over _UpperCamelCase : str = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCamelCase : Optional[Any] = _k break if max_jump >= 0: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCamelCase : Tuple = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: _UpperCamelCase : Union[str, Any] = [] else: _UpperCamelCase : List[Any] = {c: []} _UpperCamelCase : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped _UpperCamelCase : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCamelCase : Union[str, Any] = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCamelCase : Any = i _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCamelCase : Union[str, Any] = ds_c + ds_b diff += addend _UpperCamelCase : Union[str, Any] = 0 for j in range(UpperCAmelCase_ ): _UpperCamelCase : Union[str, Any] = a_i[j] + addend _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): _UpperCamelCase : List[str] = digits[j] + addend if s >= 1_0: _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) _UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient else: _UpperCamelCase : Dict = s _UpperCamelCase : Optional[Any] = addend // 1_0 if addend == 0: break while addend > 0: _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [1] _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : int = 0 while True: _UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break _UpperCamelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'{solution() = }')
648
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """microsoft/beit-base-patch16-224-pt22k""": ( """https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json""" ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowercase ( _lowercase ): """simple docstring""" a__ = "beit" def __init__( self , __snake_case=81_92 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=False , __snake_case=0.1 , __snake_case=0.1 , __snake_case=True , __snake_case=[3, 5, 7, 11] , __snake_case=[1, 2, 3, 6] , __snake_case=True , __snake_case=0.4 , __snake_case=2_56 , __snake_case=1 , __snake_case=False , __snake_case=2_55 , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Tuple = vocab_size _UpperCamelCase : int = hidden_size _UpperCamelCase : List[str] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : Optional[int] = intermediate_size _UpperCamelCase : Optional[Any] = hidden_act _UpperCamelCase : Any = hidden_dropout_prob _UpperCamelCase : str = attention_probs_dropout_prob _UpperCamelCase : Dict = initializer_range _UpperCamelCase : List[str] = layer_norm_eps _UpperCamelCase : Dict = image_size _UpperCamelCase : Tuple = patch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : int = use_mask_token _UpperCamelCase : Union[str, Any] = use_absolute_position_embeddings _UpperCamelCase : Any = use_relative_position_bias _UpperCamelCase : Any = use_shared_relative_position_bias _UpperCamelCase : Optional[int] = layer_scale_init_value _UpperCamelCase : Any = drop_path_rate _UpperCamelCase : Tuple = use_mean_pooling # decode head attributes (semantic segmentation) _UpperCamelCase : str = out_indices _UpperCamelCase : List[Any] = pool_scales # auxiliary head attributes (semantic segmentation) _UpperCamelCase : int = use_auxiliary_head _UpperCamelCase : Optional[Any] = auxiliary_loss_weight _UpperCamelCase : Optional[Any] = auxiliary_channels _UpperCamelCase : str = auxiliary_num_convs _UpperCamelCase : Dict = auxiliary_concat_input _UpperCamelCase : str = semantic_loss_ignore_index class lowercase ( _lowercase ): """simple docstring""" a__ = version.parse("1.11" ) @property def A__ ( self): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def A__ ( self): return 1e-4
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowercase ( _lowercase ): """simple docstring""" a__ = "vit_mae" def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : str = hidden_act _UpperCamelCase : List[str] = hidden_dropout_prob _UpperCamelCase : List[Any] = attention_probs_dropout_prob _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : int = image_size _UpperCamelCase : Any = patch_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : Union[str, Any] = qkv_bias _UpperCamelCase : str = decoder_num_attention_heads _UpperCamelCase : Union[str, Any] = decoder_hidden_size _UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers _UpperCamelCase : Any = decoder_intermediate_size _UpperCamelCase : int = mask_ratio _UpperCamelCase : List[Any] = norm_pix_loss
648
1
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration lowerCAmelCase__ = { """tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""", """tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""", """base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""", """base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""", """small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""", """small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""", """medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""", """medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""", """large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""", """large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""", } def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : int = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCAmelCase__ = { """blocks""": """layers""", """mlp.0""": """fc1""", """mlp.2""": """fc2""", """mlp_ln""": """final_layer_norm""", """.attn.query""": """.self_attn.q_proj""", """.attn.key""": """.self_attn.k_proj""", """.attn.value""": """.self_attn.v_proj""", """.attn_ln""": """.self_attn_layer_norm""", """.attn.out""": """.self_attn.out_proj""", """.cross_attn.query""": """.encoder_attn.q_proj""", """.cross_attn.key""": """.encoder_attn.k_proj""", """.cross_attn.value""": """.encoder_attn.v_proj""", """.cross_attn_ln""": """.encoder_attn_layer_norm""", """.cross_attn.out""": """.encoder_attn.out_proj""", """decoder.ln.""": """decoder.layer_norm.""", """encoder.ln.""": """encoder.layer_norm.""", """token_embedding""": """embed_tokens""", """encoder.positional_embedding""": """encoder.embed_positions.weight""", """decoder.positional_embedding""": """decoder.embed_positions.weight""", """ln_post""": """layer_norm""", } def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> List[Any]: '''simple docstring''' _UpperCamelCase : str = list(s_dict.keys() ) for key in keys: _UpperCamelCase : Tuple = key for k, v in WHISPER_MAPPING.items(): if k in key: _UpperCamelCase : List[str] = new_key.replace(UpperCAmelCase_ , UpperCAmelCase_ ) print(F'''{key} -> {new_key}''' ) _UpperCamelCase : int = s_dict.pop(UpperCAmelCase_ ) return s_dict def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> List[str]: '''simple docstring''' _UpperCamelCase , _UpperCamelCase : Union[str, Any] = emb.weight.shape _UpperCamelCase : Tuple = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ ) _UpperCamelCase : Any = emb.weight.data return lin_layer def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ) -> bytes: '''simple docstring''' os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = os.path.basename(UpperCAmelCase_ ) _UpperCamelCase : Tuple = url.split('/' )[-2] _UpperCamelCase : Any = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) if os.path.exists(UpperCAmelCase_ ) and not os.path.isfile(UpperCAmelCase_ ): raise RuntimeError(F'''{download_target} exists and is not a regular file''' ) if os.path.isfile(UpperCAmelCase_ ): _UpperCamelCase : List[Any] = open(UpperCAmelCase_ , 'rb' ).read() if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''' ) with urllib.request.urlopen(UpperCAmelCase_ ) as source, open(UpperCAmelCase_ , 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ) , ncols=8_0 , unit='iB' , unit_scale=UpperCAmelCase_ , unit_divisor=1_0_2_4 ) as loop: while True: _UpperCamelCase : Optional[Any] = source.read(8_1_9_2 ) if not buffer: break output.write(UpperCAmelCase_ ) loop.update(len(UpperCAmelCase_ ) ) _UpperCamelCase : Union[str, Any] = open(UpperCAmelCase_ , 'rb' ).read() if hashlib.shaaaa(UpperCAmelCase_ ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ) -> Optional[Any]: '''simple docstring''' if ".pt" not in checkpoint_path: _UpperCamelCase : List[Any] = _download(_MODELS[checkpoint_path] ) else: _UpperCamelCase : Dict = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : str = original_checkpoint['dims'] _UpperCamelCase : Dict = original_checkpoint['model_state_dict'] _UpperCamelCase : Optional[int] = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(UpperCAmelCase_ ) rename_keys(UpperCAmelCase_ ) _UpperCamelCase : Dict = True _UpperCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0] _UpperCamelCase : Any = WhisperConfig( vocab_size=dimensions['n_vocab'] , encoder_ffn_dim=UpperCAmelCase_ , decoder_ffn_dim=UpperCAmelCase_ , num_mel_bins=dimensions['n_mels'] , d_model=dimensions['n_audio_state'] , max_target_positions=dimensions['n_text_ctx'] , encoder_layers=dimensions['n_audio_layer'] , encoder_attention_heads=dimensions['n_audio_head'] , decoder_layers=dimensions['n_text_layer'] , decoder_attention_heads=dimensions['n_text_state'] , max_source_positions=dimensions['n_audio_ctx'] , ) _UpperCamelCase : Any = WhisperForConditionalGeneration(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Optional[Any] = model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0 and not set(UpperCAmelCase_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F''' but all the following weights are missing {missing}''' ) if tie_embeds: _UpperCamelCase : str = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCamelCase : str = proj_out_weights model.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # # Required parameters parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCAmelCase__ = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
648
import functools def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int: '''simple docstring''' if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase_ ) == 0: return 0 if min(UpperCAmelCase_ ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase_ ) >= 3_6_6: raise ValueError('All days elements should be less than 366' ) _UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ ) @functools.cache def dynamic_programming(UpperCAmelCase_ : int ) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
648
1
import math def lowerCamelCase_ ( ) -> None: '''simple docstring''' _UpperCamelCase : Any = input('Enter message: ' ) _UpperCamelCase : Optional[int] = int(input(F'''Enter key [2-{len(UpperCAmelCase_ ) - 1}]: ''' ) ) _UpperCamelCase : int = input('Encryption/Decryption [e/d]: ' ) if mode.lower().startswith('e' ): _UpperCamelCase : Dict = encrypt_message(UpperCAmelCase_ , UpperCAmelCase_ ) elif mode.lower().startswith('d' ): _UpperCamelCase : Union[str, Any] = decrypt_message(UpperCAmelCase_ , UpperCAmelCase_ ) # Append pipe symbol (vertical bar) to identify spaces at the end. print(F'''Output:\n{text + "|"}''' ) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> str: '''simple docstring''' _UpperCamelCase : Any = [''] * key for col in range(UpperCAmelCase_ ): _UpperCamelCase : int = col while pointer < len(UpperCAmelCase_ ): cipher_text[col] += message[pointer] pointer += key return "".join(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str ) -> str: '''simple docstring''' _UpperCamelCase : Optional[Any] = math.ceil(len(UpperCAmelCase_ ) / key ) _UpperCamelCase : Union[str, Any] = key _UpperCamelCase : int = (num_cols * num_rows) - len(UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = [''] * num_cols _UpperCamelCase : Dict = 0 _UpperCamelCase : Union[str, Any] = 0 for symbol in message: plain_text[col] += symbol col += 1 if ( (col == num_cols) or (col == num_cols - 1) and (row >= num_rows - num_shaded_boxes) ): _UpperCamelCase : str = 0 row += 1 return "".join(UpperCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod() main()
648
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Optional[Any] = batch_size _UpperCamelCase : int = seq_length _UpperCamelCase : str = is_training _UpperCamelCase : Tuple = use_input_mask _UpperCamelCase : Union[str, Any] = use_token_type_ids _UpperCamelCase : Union[str, Any] = use_labels _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[Any] = embedding_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : int = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : Tuple = hidden_dropout_prob _UpperCamelCase : int = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : List[str] = type_vocab_size _UpperCamelCase : Dict = type_sequence_label_size _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : Optional[Any] = num_labels _UpperCamelCase : Tuple = num_choices _UpperCamelCase : List[str] = scope def A__ ( self): _UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase : Any = None if self.use_input_mask: _UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase : Optional[Any] = None if self.use_token_type_ids: _UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase : int = None _UpperCamelCase : List[str] = None _UpperCamelCase : Dict = None if self.use_labels: _UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[str] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[int] = self.num_labels _UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Any = self.num_labels _UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = self.num_choices _UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) : Optional[int] = config_and_inputs _UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True # test_resize_embeddings = False a__ = False def A__ ( self , __snake_case , __snake_case , __snake_case=False): _UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case) if return_labels: if model_class in get_values(__snake_case): _UpperCamelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case) _UpperCamelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case) return inputs_dict def A__ ( self): _UpperCamelCase : Any = MegatronBertModelTester(self) _UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37) def A__ ( self): self.config_tester.run_common_tests() def A__ ( self): _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case) def A__ ( self): _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case) def A__ ( self): _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case) def A__ ( self): _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case) def A__ ( self): _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]: '''simple docstring''' return torch.tensor( UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , ) lowerCAmelCase__ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.') def A__ ( self): _UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: _UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case) _UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case) model.to(__snake_case) model.half() _UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): _UpperCamelCase : str = model(__snake_case)[0] _UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24)) self.assertEqual(output.shape , __snake_case) _UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3): for jj in range(3): _UpperCamelCase : Optional[Any] = output[0, ii, jj] _UpperCamelCase : Dict = expected[3 * ii + jj] _UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
648
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """google/realm-cc-news-pretrained-embedder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-encoder""": ( """https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-scorer""": ( """https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json""" ), """google/realm-cc-news-pretrained-openqa""": ( """https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json""" ), """google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""", """google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""", """google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""", """google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""", # See all REALM models at https://huggingface.co/models?filter=realm } class lowercase ( _lowercase ): """simple docstring""" a__ = "realm" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=1_28 , __snake_case=12 , __snake_case=12 , __snake_case=8 , __snake_case=30_72 , __snake_case="gelu_new" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_56 , __snake_case=10 , __snake_case=1e-3 , __snake_case=5 , __snake_case=3_20 , __snake_case=13_35_37_18 , __snake_case=50_00 , __snake_case=1 , __snake_case=0 , __snake_case=2 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case) # Common config _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : List[str] = retriever_proj_size _UpperCamelCase : Any = num_hidden_layers _UpperCamelCase : Dict = num_attention_heads _UpperCamelCase : List[Any] = num_candidates _UpperCamelCase : List[Any] = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : Dict = hidden_dropout_prob _UpperCamelCase : int = attention_probs_dropout_prob _UpperCamelCase : Tuple = initializer_range _UpperCamelCase : List[str] = type_vocab_size _UpperCamelCase : int = layer_norm_eps # Reader config _UpperCamelCase : Tuple = span_hidden_size _UpperCamelCase : Dict = max_span_width _UpperCamelCase : Dict = reader_layer_norm_eps _UpperCamelCase : Union[str, Any] = reader_beam_size _UpperCamelCase : Optional[int] = reader_seq_len # Retrieval config _UpperCamelCase : Tuple = num_block_records _UpperCamelCase : Tuple = searcher_beam_size
648
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = """▁""" lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase__ = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } lowerCAmelCase__ = { """xlm-roberta-base""": 5_1_2, """xlm-roberta-large""": 5_1_2, """xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2, """xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2, """xlm-roberta-large-finetuned-conll03-english""": 5_1_2, """xlm-roberta-large-finetuned-conll03-german""": 5_1_2, } class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token _UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) _UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__snake_case)) _UpperCamelCase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase : List[Any] = 1 _UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset _UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self): _UpperCamelCase : List[Any] = self.__dict__.copy() _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self , __snake_case): _UpperCamelCase : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCamelCase : Tuple = {} _UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def A__ ( self , __snake_case , __snake_case = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase : Tuple = [self.cls_token_id] _UpperCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is None: return [1] + ([0] * len(__snake_case)) + [1] return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def A__ ( self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def A__ ( self): _UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def A__ ( self , __snake_case): return self.sp_model.encode(__snake_case , out_type=__snake_case) def A__ ( self , __snake_case): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase : str = self.sp_model.PieceToId(__snake_case) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , __snake_case): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def A__ ( self , __snake_case): _UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip() return out_string def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCamelCase : str = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __snake_case) elif not os.path.isfile(self.vocab_file): with open(__snake_case , 'wb') as fi: _UpperCamelCase : Any = self.sp_model.serialized_model_proto() fi.write(__snake_case) return (out_vocab_file,)
648
1
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) lowerCAmelCase__ = logging.getLogger() lowerCAmelCase__ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class lowercase ( _lowercase ): """simple docstring""" def A__ ( self , __snake_case): os.makedirs(__snake_case , exist_ok=__snake_case) _UpperCamelCase : Dict = {'source': 'What is love ?', 'target': 'life'} _UpperCamelCase : List[str] = {'train': 12, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _UpperCamelCase : str = '\n'.join([contents[field]] * n_lines[split]) with open(os.path.join(__snake_case , f'''{split}.{field}''') , 'w') as f: f.write(__snake_case) def A__ ( self , __snake_case , __snake_case = "pytorch"): _UpperCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir() _UpperCamelCase : int = os.path.join(__snake_case , 'output') _UpperCamelCase : Optional[Any] = os.path.join(__snake_case , 'data') self._create_dummy_data(data_dir=__snake_case) _UpperCamelCase : Optional[Any] = f''' --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ '''.split() if gpus > 0: testargs.append(f'''--gpus={gpus}''') if is_apex_available(): testargs.append('--fp16') else: testargs.append('--gpus=0') testargs.append('--distributed_backend=ddp_cpu') testargs.append('--num_processes=2') _UpperCamelCase : List[str] = [sys.executable, str(Path(finetune_rag.__file__).resolve())] + testargs execute_subprocess_async(__snake_case , env=self.get_env()) _UpperCamelCase : Dict = os.path.join(__snake_case , 'metrics.json') with open(__snake_case) as f: _UpperCamelCase : Optional[Any] = json.load(__snake_case) return result @require_torch_gpu def A__ ( self): _UpperCamelCase : Dict = self._run_finetune(gpus=1) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2) @require_torch_multi_gpu def A__ ( self): _UpperCamelCase : str = self._run_finetune(gpus=2) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2) @require_torch_gpu @require_ray def A__ ( self): _UpperCamelCase : List[str] = self._run_finetune(gpus=1 , distributed_retriever='ray') self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2) @require_torch_multi_gpu @require_ray def A__ ( self): _UpperCamelCase : Any = self._run_finetune(gpus=1 , distributed_retriever='ray') self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2)
648
from ...processing_utils import ProcessorMixin class lowercase ( _lowercase ): """simple docstring""" a__ = ["image_processor", "feature_extractor"] a__ = "TvltImageProcessor" a__ = "TvltFeatureExtractor" def __init__( self , __snake_case , __snake_case): super().__init__(image_processor=__snake_case , feature_extractor=__snake_case) _UpperCamelCase : List[str] = image_processor _UpperCamelCase : Dict = feature_extractor def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') _UpperCamelCase : Union[str, Any] = None if images is not None: _UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case) if images_mixed is not None: _UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case) if audio is not None: _UpperCamelCase : Tuple = self.feature_extractor( __snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case) _UpperCamelCase : Tuple = {} if audio is not None: output_dict.update(__snake_case) if images is not None: output_dict.update(__snake_case) if images_mixed_dict is not None: output_dict.update(__snake_case) return output_dict @property def A__ ( self): _UpperCamelCase : List[Any] = self.image_processor.model_input_names _UpperCamelCase : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
648
1
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class lowercase ( unittest.TestCase ): """simple docstring""" a__ = inspect.getfile(accelerate.test_utils ) a__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] ) a__ = ["accelerate", "launch"] a__ = Path.home() / ".cache/huggingface/accelerate" a__ = "default_config.yaml" a__ = config_folder / config_file a__ = config_folder / "_default_config.yaml" a__ = Path("tests/test_configs" ) @classmethod def A__ ( cls): if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path) @classmethod def A__ ( cls): if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path) def A__ ( self): _UpperCamelCase : Dict = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy()) def A__ ( self): for config in sorted(self.test_config_path.glob('**/*.yaml')): with self.subTest(config_file=__snake_case): execute_subprocess_async( self.base_cmd + ['--config_file', str(__snake_case), self.test_file_path] , env=os.environ.copy()) def A__ ( self): execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy()) class lowercase ( unittest.TestCase ): """simple docstring""" a__ = "test-tpu" a__ = "us-central1-a" a__ = "ls" a__ = ["accelerate", "tpu-config"] a__ = "cd /usr/share" a__ = "tests/test_samples/test_command_file.sh" a__ = "Running gcloud compute tpus tpu-vm ssh" def A__ ( self): _UpperCamelCase : List[str] = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Any = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Tuple = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=__snake_case) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : List[str] = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Tuple = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo "Hello World"', '--debug', ] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Optional[int] = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Optional[Any] = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Optional[Any] = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , ) def A__ ( self): _UpperCamelCase : Any = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=__snake_case , ) self.assertIn( f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , __snake_case , )
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase ( _lowercase ): """simple docstring""" a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ): _UpperCamelCase : str = vocab_size _UpperCamelCase : int = context_length _UpperCamelCase : Tuple = hidden_size _UpperCamelCase : Tuple = num_hidden_layers _UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Dict = rescale_every _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
648
1
lowerCAmelCase__ = [ (1_0_0_0, """M"""), (9_0_0, """CM"""), (5_0_0, """D"""), (4_0_0, """CD"""), (1_0_0, """C"""), (9_0, """XC"""), (5_0, """L"""), (4_0, """XL"""), (1_0, """X"""), (9, """IX"""), (5, """V"""), (4, """IV"""), (1, """I"""), ] def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> int: '''simple docstring''' _UpperCamelCase : List[Any] = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0} _UpperCamelCase : Union[str, Any] = 0 _UpperCamelCase : Any = 0 while place < len(UpperCAmelCase_ ): if (place + 1 < len(UpperCAmelCase_ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> str: '''simple docstring''' _UpperCamelCase : Dict = [] for arabic, roman in ROMAN: ((_UpperCamelCase) , (_UpperCamelCase)) : Optional[Any] = divmod(UpperCAmelCase_ , UpperCAmelCase_ ) result.append(roman * factor ) if number == 0: break return "".join(UpperCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
648
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class lowercase ( _lowercase ): """simple docstring""" a__ = "bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : int = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : List[str] = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : Optional[Any] = intermediate_size _UpperCamelCase : Union[str, Any] = hidden_dropout_prob _UpperCamelCase : Tuple = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : List[str] = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Any = use_cache _UpperCamelCase : Any = classifier_dropout class lowercase ( _lowercase ): """simple docstring""" @property def A__ ( self): if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
648
1
import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCAmelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=None ) -> Optional[int]: '''simple docstring''' require_version(deps[pkg] , UpperCAmelCase_ )
648
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase ( _lowercase ): """simple docstring""" a__ = "facebook/bart-large-mnli" a__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a__ = "text_classifier" a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ["text", ["text"]] a__ = ["text"] def A__ ( self): super().setup() _UpperCamelCase : List[Any] = self.model.config _UpperCamelCase : Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): _UpperCamelCase : Tuple = int(__snake_case) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : List[Any] = labels return self.pre_processor( [text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A__ ( self , __snake_case): _UpperCamelCase : str = outputs.logits _UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
648
1
from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCAmelCase__ = logging.get_logger(__name__) @add_end_docstrings(_lowercase ) class lowercase ( _lowercase ): """simple docstring""" def __init__( self , **__snake_case): super().__init__(**__snake_case) requires_backends(self , 'vision') requires_backends(self , 'torch') if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''') self.check_model_type(__snake_case) def A__ ( self , **__snake_case): _UpperCamelCase : int = {} _UpperCamelCase : Any = {} _UpperCamelCase : Optional[Any] = {} # preprocess args if "points_per_batch" in kwargs: _UpperCamelCase : Optional[Any] = kwargs['points_per_batch'] if "points_per_crop" in kwargs: _UpperCamelCase : Optional[Any] = kwargs['points_per_crop'] if "crops_n_layers" in kwargs: _UpperCamelCase : Any = kwargs['crops_n_layers'] if "crop_overlap_ratio" in kwargs: _UpperCamelCase : List[str] = kwargs['crop_overlap_ratio'] if "crop_n_points_downscale_factor" in kwargs: _UpperCamelCase : Optional[Any] = kwargs['crop_n_points_downscale_factor'] # postprocess args if "pred_iou_thresh" in kwargs: _UpperCamelCase : Dict = kwargs['pred_iou_thresh'] if "stability_score_offset" in kwargs: _UpperCamelCase : Dict = kwargs['stability_score_offset'] if "mask_threshold" in kwargs: _UpperCamelCase : Optional[int] = kwargs['mask_threshold'] if "stability_score_thresh" in kwargs: _UpperCamelCase : Optional[int] = kwargs['stability_score_thresh'] if "crops_nms_thresh" in kwargs: _UpperCamelCase : List[Any] = kwargs['crops_nms_thresh'] if "output_rle_mask" in kwargs: _UpperCamelCase : List[Any] = kwargs['output_rle_mask'] if "output_bboxes_mask" in kwargs: _UpperCamelCase : Union[str, Any] = kwargs['output_bboxes_mask'] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self , __snake_case , *__snake_case , __snake_case=None , __snake_case=None , **__snake_case): return super().__call__(__snake_case , *__snake_case , num_workers=__snake_case , batch_size=__snake_case , **__snake_case) def A__ ( self , __snake_case , __snake_case=64 , __snake_case = 0 , __snake_case = 5_12 / 15_00 , __snake_case = 32 , __snake_case = 1 , ): _UpperCamelCase : List[Any] = load_image(__snake_case) _UpperCamelCase : Union[str, Any] = self.image_processor.size['longest_edge'] _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = self.image_processor.generate_crop_boxes( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case) _UpperCamelCase : str = self.image_processor(images=__snake_case , return_tensors='pt') with self.device_placement(): if self.framework == "pt": _UpperCamelCase : Optional[int] = self.get_inference_context() with inference_context(): _UpperCamelCase : Dict = self._ensure_tensor_on_device(__snake_case , device=self.device) _UpperCamelCase : Tuple = self.model.get_image_embeddings(model_inputs.pop('pixel_values')) _UpperCamelCase : Union[str, Any] = image_embeddings _UpperCamelCase : str = grid_points.shape[1] _UpperCamelCase : int = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( 'Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. ' 'To return all points at once, set points_per_batch to None') for i in range(0 , __snake_case , __snake_case): _UpperCamelCase : str = grid_points[:, i : i + points_per_batch, :, :] _UpperCamelCase : Tuple = input_labels[:, i : i + points_per_batch] _UpperCamelCase : str = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def A__ ( self , __snake_case , __snake_case=0.8_8 , __snake_case=0.9_5 , __snake_case=0 , __snake_case=1 , ): _UpperCamelCase : int = model_inputs.pop('input_boxes') _UpperCamelCase : List[str] = model_inputs.pop('is_last') _UpperCamelCase : Optional[int] = model_inputs.pop('original_sizes').tolist() _UpperCamelCase : Optional[Any] = model_inputs.pop('reshaped_input_sizes').tolist() _UpperCamelCase : Optional[int] = self.model(**__snake_case) # post processing happens here in order to avoid CPU GPU copies of ALL the masks _UpperCamelCase : Dict = model_outputs['pred_masks'] _UpperCamelCase : Union[str, Any] = self.image_processor.post_process_masks( __snake_case , __snake_case , __snake_case , __snake_case , binarize=__snake_case) _UpperCamelCase : List[Any] = model_outputs['iou_scores'] _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __snake_case , __snake_case , __snake_case , __snake_case , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def A__ ( self , __snake_case , __snake_case=False , __snake_case=False , __snake_case=0.7 , ): _UpperCamelCase : Dict = [] _UpperCamelCase : str = [] _UpperCamelCase : str = [] for model_output in model_outputs: all_scores.append(model_output.pop('iou_scores')) all_masks.extend(model_output.pop('masks')) all_boxes.append(model_output.pop('boxes')) _UpperCamelCase : Tuple = torch.cat(__snake_case) _UpperCamelCase : Union[str, Any] = torch.cat(__snake_case) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = self.image_processor.post_process_for_mask_generation( __snake_case , __snake_case , __snake_case , __snake_case) _UpperCamelCase : Any = defaultdict(__snake_case) for output in model_outputs: for k, v in output.items(): extra[k].append(__snake_case) _UpperCamelCase : str = {} if output_rle_mask: _UpperCamelCase : List[str] = rle_mask if output_bboxes_mask: _UpperCamelCase : Any = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
648
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""", # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowercase ( _lowercase ): """simple docstring""" a__ = "glpn" def __init__( self , __snake_case=3 , __snake_case=4 , __snake_case=[2, 2, 2, 2] , __snake_case=[8, 4, 2, 1] , __snake_case=[32, 64, 1_60, 2_56] , __snake_case=[7, 3, 3, 3] , __snake_case=[4, 2, 2, 2] , __snake_case=[1, 2, 5, 8] , __snake_case=[4, 4, 4, 4] , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=64 , __snake_case=10 , __snake_case=-1 , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Any = num_channels _UpperCamelCase : Any = num_encoder_blocks _UpperCamelCase : List[str] = depths _UpperCamelCase : List[str] = sr_ratios _UpperCamelCase : str = hidden_sizes _UpperCamelCase : str = patch_sizes _UpperCamelCase : Tuple = strides _UpperCamelCase : Union[str, Any] = mlp_ratios _UpperCamelCase : str = num_attention_heads _UpperCamelCase : List[str] = hidden_act _UpperCamelCase : Dict = hidden_dropout_prob _UpperCamelCase : Optional[int] = attention_probs_dropout_prob _UpperCamelCase : int = initializer_range _UpperCamelCase : List[Any] = drop_path_rate _UpperCamelCase : Dict = layer_norm_eps _UpperCamelCase : Dict = decoder_hidden_size _UpperCamelCase : Union[str, Any] = max_depth _UpperCamelCase : List[Any] = head_in_index
648
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
648
1
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : Optional[int] = image_size _UpperCamelCase : str = num_channels _UpperCamelCase : Optional[Any] = embeddings_size _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : str = is_training _UpperCamelCase : Optional[int] = use_labels _UpperCamelCase : str = hidden_act _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Tuple = len(__snake_case) _UpperCamelCase : Dict = out_features _UpperCamelCase : Union[str, Any] = out_indices _UpperCamelCase : int = num_groups def A__ ( self): _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : str = None if self.use_labels: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def A__ ( self): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = BitModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Dict = self.num_labels _UpperCamelCase : Dict = BitForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _UpperCamelCase : Any = None _UpperCamelCase : str = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self): _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Dict = BitModelTester(self) _UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='Bit does not output attentions') def A__ ( self): pass @unittest.skip(reason='Bit does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='Bit does not support input and output embeddings') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(__snake_case) _UpperCamelCase : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Optional[int] = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(config=__snake_case) for name, module in model.named_modules(): if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def A__ ( self): def check_hidden_states_output(__snake_case , __snake_case , __snake_case): _UpperCamelCase : str = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : str = self.model_tester.num_stages self.assertEqual(len(__snake_case) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : Any = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[str] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) @unittest.skip(reason='Bit does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def A__ ( self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): _UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Any = model(**__snake_case) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def A__ ( self): _UpperCamelCase : List[str] = BitModelTester(self)
648
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowerCAmelCase__ = 5 lowerCAmelCase__ = 1_0 @require_sentencepiece @require_tokenizers class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = SpeechaTextTokenizer a__ = False a__ = True def A__ ( self): super().setUp() _UpperCamelCase : Any = sp.SentencePieceProcessor() spm_model.Load(__snake_case) _UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))] _UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case)))) _UpperCamelCase : Tuple = Path(self.tmpdirname) save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file']) _UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def A__ ( self): _UpperCamelCase : str = '<pad>' _UpperCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(__snake_case) , 10_01) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_01) def A__ ( self): _UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) _UpperCamelCase : List[str] = tokenizer.tokenize('This is a test') self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , ) _UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case) self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8]) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def A__ ( self): # fmt: off _UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" a__ = "valhalla/s2t_mustc_multilinguial_medium" a__ = "C'est trop cool" a__ = "Esto es genial" @classmethod def A__ ( cls): _UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def A__ ( self): self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11) def A__ ( self): self.assertEqual(self.tokenizer.vocab_size , 1_00_00) def A__ ( self): self.assertIn(__snake_case , self.tokenizer.all_special_ids) _UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2] _UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case) _UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case) self.assertEqual(__snake_case , __snake_case) self.assertNotIn(self.tokenizer.eos_token , __snake_case) def A__ ( self): _UpperCamelCase : Any = 'fr' _UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , __snake_case) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def A__ ( self): _UpperCamelCase : Union[str, Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) _UpperCamelCase : List[str] = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
648
1
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase__ = logging.getLogger(__name__) lowerCAmelCase__ = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) lowerCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowercase : """simple docstring""" a__ = field( default=_lowercase , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) a__ = field( default=_lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(_lowercase )} , ) a__ = field( default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) a__ = field( default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) a__ = field( default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class lowercase : """simple docstring""" a__ = field( default=_lowercase , metadata={"help": "The input training data file (a text file)."} ) a__ = field( default=_lowercase , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) a__ = field( default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) a__ = field( default=_lowercase , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) a__ = field( default=_lowercase , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) a__ = field( default=_lowercase , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) a__ = field( default=_lowercase , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) a__ = field(default=_lowercase , metadata={"help": "Whether ot not to use whole word mask."} ) a__ = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) a__ = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) a__ = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) a__ = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) a__ = field( default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def lowerCamelCase_ ( UpperCAmelCase_ : DataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[str] = None , ) -> Tuple: '''simple docstring''' def _dataset(UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' ) return LineByLineWithRefDataset( tokenizer=UpperCAmelCase_ , file_path=UpperCAmelCase_ , block_size=args.block_size , ref_path=UpperCAmelCase_ , ) return LineByLineTextDataset(tokenizer=UpperCAmelCase_ , file_path=UpperCAmelCase_ , block_size=args.block_size ) else: return TextDataset( tokenizer=UpperCAmelCase_ , file_path=UpperCAmelCase_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCAmelCase_ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(UpperCAmelCase_ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def lowerCamelCase_ ( ) -> str: '''simple docstring''' _UpperCamelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Dict = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( 'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ' 'or remove the --do_eval argument.' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: _UpperCamelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _UpperCamelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: _UpperCamelCase : Any = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.tokenizer_name: _UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: _UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( 'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another' ' script, save it,and load it from here, using --tokenizer_name' ) if model_args.model_name_or_path: _UpperCamelCase : Union[str, Any] = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , ) else: logger.info('Training new model from scratch' ) _UpperCamelCase : Dict = AutoModelWithLMHead.from_config(UpperCAmelCase_ ) model.resize_token_embeddings(len(UpperCAmelCase_ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( 'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the' '--mlm flag (masked language modeling).' ) if data_args.block_size <= 0: _UpperCamelCase : Tuple = tokenizer.max_len # Our input block size will be the max possible for the model else: _UpperCamelCase : List[str] = min(data_args.block_size , tokenizer.max_len ) # Get datasets _UpperCamelCase : List[str] = ( get_dataset(UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) _UpperCamelCase : Dict = ( get_dataset(UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , evaluate=UpperCAmelCase_ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": _UpperCamelCase : Tuple = DataCollatorForPermutationLanguageModeling( tokenizer=UpperCAmelCase_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: _UpperCamelCase : List[Any] = DataCollatorForWholeWordMask( tokenizer=UpperCAmelCase_ , mlm_probability=data_args.mlm_probability ) else: _UpperCamelCase : List[Any] = DataCollatorForLanguageModeling( tokenizer=UpperCAmelCase_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _UpperCamelCase : int = Trainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ , ) # Training if training_args.do_train: _UpperCamelCase : Tuple = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=UpperCAmelCase_ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation _UpperCamelCase : List[Any] = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) _UpperCamelCase : int = trainer.evaluate() _UpperCamelCase : Any = math.exp(eval_output['eval_loss'] ) _UpperCamelCase : Dict = {'perplexity': perplexity} _UpperCamelCase : List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase_ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , UpperCAmelCase_ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) results.update(UpperCAmelCase_ ) return results def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> Union[str, Any]: '''simple docstring''' main() if __name__ == "__main__": main()
648
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class lowercase ( _lowercase ): """simple docstring""" a__ = "masked_bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Any = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : str = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : Dict = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : List[Any] = layer_norm_eps _UpperCamelCase : Tuple = pruning_method _UpperCamelCase : Tuple = mask_init _UpperCamelCase : Dict = mask_scale
648
1
from math import factorial, radians def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 1_8 , UpperCAmelCase_ : int = 1_0 ) -> float: '''simple docstring''' _UpperCamelCase : Any = angle_in_degrees - ((angle_in_degrees // 3_6_0.0) * 3_6_0.0) # Converting from degrees to radians _UpperCamelCase : Optional[Any] = radians(UpperCAmelCase_ ) _UpperCamelCase : str = angle_in_radians _UpperCamelCase : Optional[Any] = 3 _UpperCamelCase : Tuple = -1 for _ in range(UpperCAmelCase_ ): result += (b * (angle_in_radians**a)) / factorial(UpperCAmelCase_ ) _UpperCamelCase : Optional[int] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": __import__("""doctest""").testmod()
648
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self , __snake_case=32): set_seed(0) _UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3) _UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1) return model, optimizer @slow def A__ ( self): _UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) _UpperCamelCase : List[Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) _UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)] _UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)] _UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)] # train with a DDPM scheduler _UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample _UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample _UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5)) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
648
1
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class lowercase ( _lowercase ): """simple docstring""" a__ = "masked_bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Any = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : str = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : Dict = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : List[Any] = layer_norm_eps _UpperCamelCase : Tuple = pruning_method _UpperCamelCase : Tuple = mask_init _UpperCamelCase : Dict = mask_scale
648
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase__ = { """sample_size""": 3_2, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [3_2, 6_4], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 6_4, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 2_5_6, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """num_train_timesteps""": 4_0, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 1_5_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str: '''simple docstring''' _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias'''] _UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Optional[Any] = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : Union[str, Any] = {} _UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight'] _UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias'] _UpperCamelCase : Dict = checkpoint['time_embed.2.weight'] _UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _UpperCamelCase : List[str] = checkpoint['label_emb.weight'] _UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight'] _UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _UpperCamelCase : Optional[int] = unet_config['down_block_types'] _UpperCamelCase : Optional[Any] = unet_config['layers_per_block'] _UpperCamelCase : Dict = unet_config['attention_head_dim'] _UpperCamelCase : List[str] = unet_config['block_out_channels'] _UpperCamelCase : str = 1 _UpperCamelCase : Optional[int] = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): _UpperCamelCase : List[str] = channels_list[i] _UpperCamelCase : str = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : str = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1''' _UpperCamelCase : Dict = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 _UpperCamelCase : Tuple = current_channels # hardcoded the mid-block for now _UpperCamelCase : Any = 'mid_block.resnets.0' _UpperCamelCase : Optional[Any] = 'middle_block.0' _UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = 'mid_block.attentions.0' _UpperCamelCase : Tuple = 'middle_block.1' _UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Tuple = 'mid_block.resnets.1' _UpperCamelCase : str = 'middle_block.2' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Optional[int] = unet_config['up_block_types'] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}''' _UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1''' _UpperCamelCase : Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2''' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = checkpoint['out.0.weight'] _UpperCamelCase : str = checkpoint['out.0.bias'] _UpperCamelCase : int = checkpoint['out.2.weight'] _UpperCamelCase : List[Any] = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = strabool(args.class_cond) lowerCAmelCase__ = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase__ = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: lowerCAmelCase__ = None lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
648
1
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
648
def lowerCamelCase_ ( UpperCAmelCase_ : list ) -> list: '''simple docstring''' if len(UpperCAmelCase_ ) <= 1: return [tuple(UpperCAmelCase_ )] _UpperCamelCase : List[Any] = [] def generate(UpperCAmelCase_ : int , UpperCAmelCase_ : list ): _UpperCamelCase : Optional[int] = [0] * n res.append(tuple(UpperCAmelCase_ ) ) _UpperCamelCase : List[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[0] else: _UpperCamelCase , _UpperCamelCase : List[str] = arr[i], arr[c[i]] res.append(tuple(UpperCAmelCase_ ) ) c[i] += 1 _UpperCamelCase : Tuple = 0 else: _UpperCamelCase : Tuple = 0 i += 1 generate(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) return res if __name__ == "__main__": lowerCAmelCase__ = input("""Enter numbers separated by a comma:\n""").strip() lowerCAmelCase__ = [int(item) for item in user_input.split(""",""")] print(heaps(arr))
648
1
import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = BarthezTokenizer a__ = BarthezTokenizerFast a__ = True a__ = True def A__ ( self): super().setUp() _UpperCamelCase : Tuple = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez') tokenizer.save_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname , legacy_format=__snake_case) _UpperCamelCase : Optional[Any] = tokenizer def A__ ( self): _UpperCamelCase : List[Any] = '<pad>' _UpperCamelCase : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , '<mask>') self.assertEqual(len(__snake_case) , 10_11_22) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22) @require_torch def A__ ( self): _UpperCamelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] _UpperCamelCase : Tuple = [0, 57, 30_18, 7_03_07, 91, 2] _UpperCamelCase : str = self.tokenizer( __snake_case , max_length=len(__snake_case) , padding=__snake_case , truncation=__snake_case , return_tensors='pt') self.assertIsInstance(__snake_case , __snake_case) self.assertEqual((2, 6) , batch.input_ids.shape) self.assertEqual((2, 6) , batch.attention_mask.shape) _UpperCamelCase : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(__snake_case , __snake_case) def A__ ( self): if not self.test_rust_tokenizer: return _UpperCamelCase : Union[str, Any] = self.get_tokenizer() _UpperCamelCase : Dict = self.get_rust_tokenizer() _UpperCamelCase : Optional[int] = 'I was born in 92000, and this is falsé.' _UpperCamelCase : int = tokenizer.tokenize(__snake_case) _UpperCamelCase : str = rust_tokenizer.tokenize(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case) _UpperCamelCase : str = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : List[str] = self.get_rust_tokenizer() _UpperCamelCase : Any = tokenizer.encode(__snake_case) _UpperCamelCase : Optional[int] = rust_tokenizer.encode(__snake_case) self.assertListEqual(__snake_case , __snake_case) @slow def A__ ( self): # fmt: off _UpperCamelCase : Union[str, Any] = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. _UpperCamelCase : Optional[int] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=__snake_case , )
648
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Optional[int]: '''simple docstring''' if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _UpperCamelCase : List[Any] = k.replace(UpperCAmelCase_ , UpperCAmelCase_ ) if k.startswith('encoder' ): _UpperCamelCase : Optional[Any] = k.replace('.attn' , '.self_attn' ) _UpperCamelCase : Optional[int] = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'final_layer_norm' ) elif k.startswith('decoder' ): _UpperCamelCase : Any = k.replace('norm1' , 'self_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm2' , 'encoder_attn_layer_norm' ) _UpperCamelCase : Tuple = k.replace('norm3' , 'final_layer_norm' ) return k def lowerCamelCase_ ( UpperCAmelCase_ : Dict ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : Union[str, Any] = [ 'model.encoder.layernorm_embedding.weight', 'model.encoder.layernorm_embedding.bias', 'model.decoder.layernorm_embedding.weight', 'model.decoder.layernorm_embedding.bias', ] for k in keys: _UpperCamelCase : Optional[int] = sd.pop(UpperCAmelCase_ ) _UpperCamelCase : str = k.replace('layernorm_embedding' , 'layer_norm' ) assert new_k not in sd _UpperCamelCase : Tuple = v lowerCAmelCase__ = ["""START"""] @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any ) -> List[str]: '''simple docstring''' _UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : int = model['model'] _UpperCamelCase : List[Any] = BlenderbotConfig.from_json_file(UpperCAmelCase_ ) _UpperCamelCase : Any = BlenderbotForConditionalGeneration(UpperCAmelCase_ ) _UpperCamelCase : int = m.model.state_dict().keys() _UpperCamelCase : Union[str, Any] = [] _UpperCamelCase : int = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _UpperCamelCase : Optional[int] = rename_state_dict_key(UpperCAmelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _UpperCamelCase : int = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(UpperCAmelCase_ ) m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ ) m.half() m.save_pretrained(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) lowerCAmelCase__ = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
648
1
# Usage: # ./gen-card-allenai-wmt16.py import os from pathlib import Path def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : Optional[int] = { 'en': 'Machine learning is great, isn\'t it?', 'ru': 'Машинное обучение - это здорово, не так ли?', 'de': 'Maschinelles Lernen ist großartig, nicht wahr?', } # BLUE scores as follows: # "pair": [fairseq, transformers] _UpperCamelCase : Optional[int] = { 'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2], 'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1], 'wmt16-en-de-12-1': [2_6.9, 2_5.7_5], } _UpperCamelCase : str = F'''{src_lang}-{tgt_lang}''' _UpperCamelCase : Tuple = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt16 - allenai license: apache-2.0 datasets: - wmt16 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). All 3 models are available: * [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1) * [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1) * [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | fairseq | transformers -------|---------|---------- {model_name} | {scores[model_name][0]} | {scores[model_name][1]} The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs. The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt16/) - [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` ''' model_card_dir.mkdir(parents=UpperCAmelCase_ , exist_ok=UpperCAmelCase_ ) _UpperCamelCase : Any = os.path.join(UpperCAmelCase_ , 'README.md' ) print(F'''Generating {path}''' ) with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f: f.write(UpperCAmelCase_ ) # make sure we are under the root of the project lowerCAmelCase__ = Path(__file__).resolve().parent.parent.parent lowerCAmelCase__ = repo_dir / """model_cards""" for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]: lowerCAmelCase__ = model_cards_dir / """allenai""" / model_name write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
648
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer lowerCAmelCase__ = ["""bert-base-uncased""", """bert-base-cased"""] lowerCAmelCase__ = """hf-internal-testing/tiny-bert-tf-only""" if is_tf_available(): class lowercase ( tf.keras.Model ): """simple docstring""" def __init__( self , __snake_case): super().__init__() _UpperCamelCase : List[Any] = tokenizer _UpperCamelCase : List[Any] = AutoConfig.from_pretrained(__snake_case) _UpperCamelCase : Dict = TFAutoModel.from_config(__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : Any = self.tokenizer(__snake_case) _UpperCamelCase : Dict = self.bert(**__snake_case) return out["pooler_output"] @require_tf @require_tensorflow_text class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self): super().setUp() _UpperCamelCase : Optional[Any] = [ BertTokenizer.from_pretrained(__snake_case) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false _UpperCamelCase : Optional[Any] = [TFBertTokenizer.from_pretrained(__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) _UpperCamelCase : Optional[Any] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] _UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1])) def A__ ( self): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : List[str] = tokenizer(__snake_case , return_tensors='tf' , padding='longest') _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa) == tf_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf_tokenizer(self.paired_sentences) _UpperCamelCase : Optional[Any] = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa) == separated_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Tuple = tf.function(__snake_case) for test_inputs in (self.test_sentences, self.paired_sentences): _UpperCamelCase : Optional[int] = tf.constant(__snake_case) _UpperCamelCase : Union[str, Any] = compiled_tokenizer(__snake_case) _UpperCamelCase : Tuple = tf_tokenizer(__snake_case) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def A__ ( self): for tf_tokenizer in self.tf_tokenizers: _UpperCamelCase : Any = ModelToSave(tokenizer=__snake_case) _UpperCamelCase : Any = tf.convert_to_tensor(self.test_sentences) _UpperCamelCase : Union[str, Any] = model(__snake_case) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _UpperCamelCase : int = Path(__snake_case) / 'saved.model' model.save(__snake_case) _UpperCamelCase : Optional[int] = tf.keras.models.load_model(__snake_case) _UpperCamelCase : int = loaded_model(__snake_case) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)) , 1e-5)
648
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase__ = { """configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Swinv2ForImageClassification""", """Swinv2ForMaskedImageModeling""", """Swinv2Model""", """Swinv2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase__ = { """configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""], """tokenization_canine""": ["""CanineTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""", """CanineForMultipleChoice""", """CanineForQuestionAnswering""", """CanineForSequenceClassification""", """CanineForTokenClassification""", """CanineLayer""", """CanineModel""", """CaninePreTrainedModel""", """load_tf_weights_in_canine""", ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
import cmath import math def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> complex: '''simple docstring''' _UpperCamelCase : Dict = math.radians(UpperCAmelCase_ ) _UpperCamelCase : Tuple = math.radians(UpperCAmelCase_ ) # Convert voltage and current to rectangular form _UpperCamelCase : Dict = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Union[str, Any] = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
648
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=10 , __snake_case=[8, 16, 32, 64] , __snake_case=[1, 1, 2, 1] , __snake_case=True , __snake_case=True , __snake_case="relu" , __snake_case=3 , __snake_case=None , __snake_case=["stage2", "stage3", "stage4"] , __snake_case=[2, 3, 4] , __snake_case=1 , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Dict = batch_size _UpperCamelCase : Optional[int] = image_size _UpperCamelCase : str = num_channels _UpperCamelCase : Optional[Any] = embeddings_size _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : Dict = depths _UpperCamelCase : str = is_training _UpperCamelCase : Optional[int] = use_labels _UpperCamelCase : str = hidden_act _UpperCamelCase : Optional[int] = num_labels _UpperCamelCase : Optional[int] = scope _UpperCamelCase : Tuple = len(__snake_case) _UpperCamelCase : Dict = out_features _UpperCamelCase : Union[str, Any] = out_indices _UpperCamelCase : int = num_groups def A__ ( self): _UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : str = None if self.use_labels: _UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels) _UpperCamelCase : str = self.get_config() return config, pixel_values, labels def A__ ( self): return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = BitModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Dict = self.num_labels _UpperCamelCase : Dict = BitForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None _UpperCamelCase : Any = None _UpperCamelCase : str = BitBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self): _UpperCamelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () a__ = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Dict = BitModelTester(self) _UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return @unittest.skip(reason='Bit does not output attentions') def A__ ( self): pass @unittest.skip(reason='Bit does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='Bit does not support input and output embeddings') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : int = model_class(__snake_case) _UpperCamelCase : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : Optional[int] = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : Union[str, Any] = model_class(config=__snake_case) for name, module in model.named_modules(): if isinstance(__snake_case , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def A__ ( self): def check_hidden_states_output(__snake_case , __snake_case , __snake_case): _UpperCamelCase : str = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Union[str, Any] = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _UpperCamelCase : str = self.model_tester.num_stages self.assertEqual(len(__snake_case) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: _UpperCamelCase : Any = layer_type _UpperCamelCase : Tuple = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[str] = True check_hidden_states_output(__snake_case , __snake_case , __snake_case) @unittest.skip(reason='Bit does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @slow def A__ ( self): for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : Optional[Any] = BitModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def lowerCamelCase_ ( ) -> Optional[int]: '''simple docstring''' _UpperCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def A__ ( self): _UpperCamelCase : Optional[Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__snake_case) _UpperCamelCase : str = self.default_image_processor _UpperCamelCase : List[str] = prepare_img() _UpperCamelCase : int = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Any = model(**__snake_case) # verify the logits _UpperCamelCase : Dict = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (BitBackbone,) if is_torch_available() else () a__ = BitConfig a__ = False def A__ ( self): _UpperCamelCase : List[str] = BitModelTester(self)
648
1
import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = {"""vocab_file""": """vocab.json"""} lowerCAmelCase__ = { """vocab_file""": { """mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""", } } lowerCAmelCase__ = {"""mgp-str""": 2_7} class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , __snake_case , __snake_case="[GO]" , __snake_case="[GO]" , __snake_case="[s]" , __snake_case="[GO]" , **__snake_case): super().__init__( unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , pad_token=__snake_case , **__snake_case , ) with open(__snake_case , encoding='utf-8') as vocab_handle: _UpperCamelCase : Tuple = json.load(__snake_case) _UpperCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()} @property def A__ ( self): return len(self.vocab) def A__ ( self): return dict(self.vocab , **self.added_tokens_encoder) def A__ ( self , __snake_case): _UpperCamelCase : List[str] = [] for s in text: char_tokens.extend(__snake_case) return char_tokens def A__ ( self , __snake_case): return self.vocab.get(__snake_case , self.vocab.get(self.unk_token)) def A__ ( self , __snake_case): return self.decoder.get(__snake_case) def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error('Vocabulary path ({}) should be a directory'.format(__snake_case)) return _UpperCamelCase : str = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) with open(__snake_case , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case) + '\n') return (vocab_file,)
648
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowerCAmelCase__ = numpy.array([0, 0]) lowerCAmelCase__ = numpy.array([0.5, 0.8_66_02_54]) lowerCAmelCase__ = numpy.array([1, 0]) lowerCAmelCase__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] , UpperCAmelCase_ : int ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : Tuple = initial_vectors for _ in range(UpperCAmelCase_ ): _UpperCamelCase : str = iteration_step(UpperCAmelCase_ ) return vectors def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> list[numpy.ndarray]: '''simple docstring''' _UpperCamelCase : int = [] for i, start_vector in enumerate(vectors[:-1] ): _UpperCamelCase : Union[str, Any] = vectors[i + 1] new_vectors.append(UpperCAmelCase_ ) _UpperCamelCase : Tuple = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def lowerCamelCase_ ( UpperCAmelCase_ : numpy.ndarray , UpperCAmelCase_ : float ) -> numpy.ndarray: '''simple docstring''' _UpperCamelCase : str = numpy.radians(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Optional[Any] = numpy.cos(UpperCAmelCase_ ), numpy.sin(UpperCAmelCase_ ) _UpperCamelCase : Any = numpy.array(((c, -s), (s, c)) ) return numpy.dot(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : list[numpy.ndarray] ) -> None: '''simple docstring''' _UpperCamelCase : str = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() _UpperCamelCase , _UpperCamelCase : Dict = zip(*UpperCAmelCase_ ) plt.plot(UpperCAmelCase_ , UpperCAmelCase_ ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase__ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
648
1
import json import os import unittest from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = CTRLTokenizer a__ = False a__ = False def A__ ( self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCamelCase : Union[str, Any] = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>'] _UpperCamelCase : int = dict(zip(__snake_case , range(len(__snake_case)))) _UpperCamelCase : List[Any] = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', ''] _UpperCamelCase : int = {'unk_token': '<unk>'} _UpperCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) _UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(__snake_case) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(__snake_case)) def A__ ( self , **__snake_case): kwargs.update(self.special_tokens_map) return CTRLTokenizer.from_pretrained(self.tmpdirname , **__snake_case) def A__ ( self , __snake_case): _UpperCamelCase : Union[str, Any] = 'adapt react readapt apt' _UpperCamelCase : Optional[int] = 'adapt react readapt apt' return input_text, output_text def A__ ( self): _UpperCamelCase : List[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map) _UpperCamelCase : Optional[Any] = 'adapt react readapt apt' _UpperCamelCase : Dict = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split() _UpperCamelCase : List[str] = tokenizer.tokenize(__snake_case) self.assertListEqual(__snake_case , __snake_case) _UpperCamelCase : int = tokens + [tokenizer.unk_token] _UpperCamelCase : Union[str, Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6] self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case) , __snake_case)
648
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. lowerCAmelCase__ = abspath(join(dirname(__file__), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Union[str, Any]: '''simple docstring''' config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> List[Any]: '''simple docstring''' from transformers.testing_utils import pytest_terminal_summary_main _UpperCamelCase : str = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict ) -> Tuple: '''simple docstring''' if exitstatus == 5: _UpperCamelCase : List[Any] = 0 # Doctest custom flag to ignore output. lowerCAmelCase__ = doctest.register_optionflag("""IGNORE_RESULT""") lowerCAmelCase__ = doctest.OutputChecker class lowercase ( _lowercase ): """simple docstring""" def A__ ( self , __snake_case , __snake_case , __snake_case): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , __snake_case , __snake_case , __snake_case) lowerCAmelCase__ = CustomOutputChecker lowerCAmelCase__ = HfDoctestModule lowerCAmelCase__ = HfDocTestParser
648
1
lowerCAmelCase__ = [0, 2, 4, 6, 8] lowerCAmelCase__ = [1, 3, 5, 7, 9] def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> int: '''simple docstring''' if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 1_0 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 _UpperCamelCase : Optional[Any] = 0 for digit in range(1_0 ): _UpperCamelCase : List[str] = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 1_0 , UpperCAmelCase_ , UpperCAmelCase_ ) return result _UpperCamelCase : int = 0 for digita in range(1_0 ): _UpperCamelCase : Optional[int] = digita if (remainder + digita) % 2 == 0: _UpperCamelCase : List[Any] = ODD_DIGITS else: _UpperCamelCase : Tuple = EVEN_DIGITS for digita in other_parity_digits: _UpperCamelCase : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 1_0 , UpperCAmelCase_ , UpperCAmelCase_ , ) return result def lowerCamelCase_ ( UpperCAmelCase_ : int = 9 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(UpperCAmelCase_ , 0 , [0] * length , UpperCAmelCase_ ) return result if __name__ == "__main__": print(f'{solution() = }')
648
lowerCAmelCase__ = range(2, 2_0 + 1) lowerCAmelCase__ = [1_0**k for k in range(ks[-1] + 1)] lowerCAmelCase__ = {} def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) _UpperCamelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) _UpperCamelCase , _UpperCamelCase : Dict = 0, 0 _UpperCamelCase : Optional[int] = n - i _UpperCamelCase : Union[str, Any] = memo.get(UpperCAmelCase_ ) if sub_memo is not None: _UpperCamelCase : str = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over _UpperCamelCase : str = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: _UpperCamelCase : Optional[Any] = _k break if max_jump >= 0: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = jumps[max_jump] # since the difference between jumps is cached, add c _UpperCamelCase : Tuple = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: _UpperCamelCase : Union[str, Any] = [] else: _UpperCamelCase : List[Any] = {c: []} _UpperCamelCase : Optional[int] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps _UpperCamelCase , _UpperCamelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead _UpperCamelCase , _UpperCamelCase : Any = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped _UpperCamelCase : List[str] = sub_memo[c] # keep jumps sorted by # of terms skipped _UpperCamelCase : Union[str, Any] = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) _UpperCamelCase : Any = i _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 _UpperCamelCase : Union[str, Any] = ds_c + ds_b diff += addend _UpperCamelCase : Union[str, Any] = 0 for j in range(UpperCAmelCase_ ): _UpperCamelCase : Union[str, Any] = a_i[j] + addend _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any ) -> Dict: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): _UpperCamelCase : List[str] = digits[j] + addend if s >= 1_0: _UpperCamelCase , _UpperCamelCase : Any = divmod(UpperCAmelCase_ , 1_0 ) _UpperCamelCase : Union[str, Any] = addend // 1_0 + quotient else: _UpperCamelCase : Dict = s _UpperCamelCase : Optional[Any] = addend // 1_0 if addend == 0: break while addend > 0: _UpperCamelCase , _UpperCamelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase_ ( UpperCAmelCase_ : int = 1_0**1_5 ) -> int: '''simple docstring''' _UpperCamelCase : Optional[Any] = [1] _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : int = 0 while True: _UpperCamelCase , _UpperCamelCase : List[Any] = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break _UpperCamelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(f'{solution() = }')
648
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Optional[Any] = batch_size _UpperCamelCase : int = seq_length _UpperCamelCase : str = is_training _UpperCamelCase : Tuple = use_input_mask _UpperCamelCase : Union[str, Any] = use_token_type_ids _UpperCamelCase : Union[str, Any] = use_labels _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[Any] = embedding_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : int = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : Tuple = hidden_dropout_prob _UpperCamelCase : int = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : List[str] = type_vocab_size _UpperCamelCase : Dict = type_sequence_label_size _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : Optional[Any] = num_labels _UpperCamelCase : Tuple = num_choices _UpperCamelCase : List[str] = scope def A__ ( self): _UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase : Any = None if self.use_input_mask: _UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase : Optional[Any] = None if self.use_token_type_ids: _UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase : int = None _UpperCamelCase : List[str] = None _UpperCamelCase : Dict = None if self.use_labels: _UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[str] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[int] = self.num_labels _UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Any = self.num_labels _UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = self.num_choices _UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) : Optional[int] = config_and_inputs _UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True # test_resize_embeddings = False a__ = False def A__ ( self , __snake_case , __snake_case , __snake_case=False): _UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case) if return_labels: if model_class in get_values(__snake_case): _UpperCamelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case) _UpperCamelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case) return inputs_dict def A__ ( self): _UpperCamelCase : Any = MegatronBertModelTester(self) _UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37) def A__ ( self): self.config_tester.run_common_tests() def A__ ( self): _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case) def A__ ( self): _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case) def A__ ( self): _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case) def A__ ( self): _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case) def A__ ( self): _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]: '''simple docstring''' return torch.tensor( UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , ) lowerCAmelCase__ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.') def A__ ( self): _UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: _UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case) _UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case) model.to(__snake_case) model.half() _UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): _UpperCamelCase : str = model(__snake_case)[0] _UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24)) self.assertEqual(output.shape , __snake_case) _UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3): for jj in range(3): _UpperCamelCase : Optional[Any] = output[0, ii, jj] _UpperCamelCase : Dict = expected[3 * ii + jj] _UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""", # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowercase ( _lowercase ): """simple docstring""" a__ = "vit_mae" def __init__( self , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=2_24 , __snake_case=16 , __snake_case=3 , __snake_case=True , __snake_case=16 , __snake_case=5_12 , __snake_case=8 , __snake_case=20_48 , __snake_case=0.7_5 , __snake_case=False , **__snake_case , ): super().__init__(**__snake_case) _UpperCamelCase : Optional[int] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Tuple = num_attention_heads _UpperCamelCase : List[str] = intermediate_size _UpperCamelCase : str = hidden_act _UpperCamelCase : List[str] = hidden_dropout_prob _UpperCamelCase : List[Any] = attention_probs_dropout_prob _UpperCamelCase : str = initializer_range _UpperCamelCase : Any = layer_norm_eps _UpperCamelCase : int = image_size _UpperCamelCase : Any = patch_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : Union[str, Any] = qkv_bias _UpperCamelCase : str = decoder_num_attention_heads _UpperCamelCase : Union[str, Any] = decoder_hidden_size _UpperCamelCase : Union[str, Any] = decoder_num_hidden_layers _UpperCamelCase : Any = decoder_intermediate_size _UpperCamelCase : int = mask_ratio _UpperCamelCase : List[Any] = norm_pix_loss
648
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase ( _lowercase ): """simple docstring""" a__ = "facebook/bart-large-mnli" a__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a__ = "text_classifier" a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ["text", ["text"]] a__ = ["text"] def A__ ( self): super().setup() _UpperCamelCase : List[Any] = self.model.config _UpperCamelCase : Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): _UpperCamelCase : Tuple = int(__snake_case) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : List[Any] = labels return self.pre_processor( [text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A__ ( self , __snake_case): _UpperCamelCase : str = outputs.logits _UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
648
import functools def lowerCamelCase_ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : list[int] ) -> int: '''simple docstring''' if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase_ ) != 3 or not all(isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase_ ) == 0: return 0 if min(UpperCAmelCase_ ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase_ ) >= 3_6_6: raise ValueError('All days elements should be less than 366' ) _UpperCamelCase : Union[str, Any] = set(UpperCAmelCase_ ) @functools.cache def dynamic_programming(UpperCAmelCase_ : int ) -> int: if index > 3_6_5: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
648
1
import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case="resnet50" , __snake_case=3 , __snake_case=32 , __snake_case=3 , __snake_case=True , __snake_case=True , ): _UpperCamelCase : Optional[Any] = parent _UpperCamelCase : Dict = out_indices if out_indices is not None else [4] _UpperCamelCase : Optional[Any] = stage_names _UpperCamelCase : int = out_features _UpperCamelCase : Any = backbone _UpperCamelCase : Dict = batch_size _UpperCamelCase : Union[str, Any] = image_size _UpperCamelCase : List[Any] = num_channels _UpperCamelCase : Any = use_pretrained_backbone _UpperCamelCase : Dict = is_training def A__ ( self): _UpperCamelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : Optional[Any] = self.get_config() return config, pixel_values def A__ ( self): return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : Optional[int] = TimmBackbone(config=__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Any = model(__snake_case) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def A__ ( self): _UpperCamelCase : str = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase : int = config_and_inputs _UpperCamelCase : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = (TimmBackbone,) if is_torch_available() else () a__ = {"feature-extraction": TimmBackbone} if is_torch_available() else {} a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : List[str] = TimmBackboneModelTester(self) _UpperCamelCase : List[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case) def A__ ( self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): _UpperCamelCase : int = 'resnet18' _UpperCamelCase : Tuple = 'microsoft/resnet-18' _UpperCamelCase : int = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case) _UpperCamelCase : List[Any] = AutoBackbone.from_pretrained(__snake_case) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names)) self.assertEqual(timm_model.channels , transformers_model.channels) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,)) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1]) _UpperCamelCase : Optional[Any] = AutoBackbone.from_pretrained(__snake_case , use_timm_backbone=__snake_case , out_indices=[1, 2, 3]) _UpperCamelCase : Dict = AutoBackbone.from_pretrained(__snake_case , out_indices=[1, 2, 3]) self.assertEqual(timm_model.out_indices , transformers_model.out_indices) self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features)) self.assertEqual(timm_model.channels , transformers_model.channels) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking') def A__ ( self): pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute') def A__ ( self): pass @unittest.skip('TimmBackbone initialization is managed on the timm side') def A__ ( self): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds') def A__ ( self): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds') def A__ ( self): pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint') def A__ ( self): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def A__ ( self): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.') def A__ ( self): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.') def A__ ( self): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def A__ ( self): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone') def A__ ( self): pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.') def A__ ( self): pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.') def A__ ( self): pass @unittest.skip('Safetensors is not supported by timm.') def A__ ( self): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : str = model_class(__snake_case) _UpperCamelCase : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : int = [*signature.parameters.keys()] _UpperCamelCase : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : Dict = True _UpperCamelCase : int = self.has_attentions # no need to test all models as different heads yield the same functionality _UpperCamelCase : Any = self.all_model_classes[0] _UpperCamelCase : List[Any] = model_class(__snake_case) model.to(__snake_case) _UpperCamelCase : List[Any] = self._prepare_for_class(__snake_case , __snake_case) _UpperCamelCase : Optional[Any] = model(**__snake_case) _UpperCamelCase : int = outputs[0][-1] # Encoder-/Decoder-only models _UpperCamelCase : List[Any] = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _UpperCamelCase : List[Any] = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__snake_case) self.assertIsNotNone(hidden_states.grad) if self.has_attentions: self.assertIsNotNone(attentions.grad) def A__ ( self): _UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCamelCase : List[Any] = model_class(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(**__snake_case) self.assertEqual(len(result.feature_maps) , len(config.out_indices)) self.assertEqual(len(model.channels) , len(config.out_indices)) # Check output of last stage is taken if out_features=None, out_indices=None _UpperCamelCase : int = copy.deepcopy(__snake_case) _UpperCamelCase : str = None _UpperCamelCase : Optional[Any] = model_class(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(**__snake_case) self.assertEqual(len(result.feature_maps) , 1) self.assertEqual(len(model.channels) , 1) # Check backbone can be initialized with fresh weights _UpperCamelCase : Union[str, Any] = copy.deepcopy(__snake_case) _UpperCamelCase : Optional[int] = False _UpperCamelCase : Optional[int] = model_class(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(**__snake_case)
648
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=64 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=16 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=3 , __snake_case=4 , __snake_case=None , ): _UpperCamelCase : List[Any] = parent _UpperCamelCase : Optional[Any] = batch_size _UpperCamelCase : int = seq_length _UpperCamelCase : str = is_training _UpperCamelCase : Tuple = use_input_mask _UpperCamelCase : Union[str, Any] = use_token_type_ids _UpperCamelCase : Union[str, Any] = use_labels _UpperCamelCase : Optional[Any] = vocab_size _UpperCamelCase : List[Any] = hidden_size _UpperCamelCase : Optional[Any] = embedding_size _UpperCamelCase : str = num_hidden_layers _UpperCamelCase : str = num_attention_heads _UpperCamelCase : int = intermediate_size _UpperCamelCase : int = hidden_act _UpperCamelCase : Tuple = hidden_dropout_prob _UpperCamelCase : int = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : List[str] = type_vocab_size _UpperCamelCase : Dict = type_sequence_label_size _UpperCamelCase : List[str] = initializer_range _UpperCamelCase : Optional[Any] = num_labels _UpperCamelCase : Tuple = num_choices _UpperCamelCase : List[str] = scope def A__ ( self): _UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _UpperCamelCase : Any = None if self.use_input_mask: _UpperCamelCase : int = random_attention_mask([self.batch_size, self.seq_length]) _UpperCamelCase : Optional[Any] = None if self.use_token_type_ids: _UpperCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _UpperCamelCase : int = None _UpperCamelCase : List[str] = None _UpperCamelCase : Dict = None if self.use_labels: _UpperCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices) _UpperCamelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A__ ( self): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , ) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = MegatronBertModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Dict = model(__snake_case , token_type_ids=__snake_case) _UpperCamelCase : Optional[Any] = model(__snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForMaskedLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Dict = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : str = MegatronBertForCausalLM(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Tuple = MegatronBertForNextSentencePrediction(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = MegatronBertForPreTraining(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[str] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = MegatronBertForQuestionAnswering(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[int] = self.num_labels _UpperCamelCase : Union[str, Any] = MegatronBertForSequenceClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Any = self.num_labels _UpperCamelCase : Optional[int] = MegatronBertForTokenClassification(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[str] = self.num_choices _UpperCamelCase : Optional[int] = MegatronBertForMultipleChoice(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : List[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : List[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Optional[Any] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _UpperCamelCase : Union[str, Any] = model( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def A__ ( self): _UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ( _UpperCamelCase ) , ) : Optional[int] = config_and_inputs _UpperCamelCase : int = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) a__ = ( { "feature-extraction": MegatronBertModel, "fill-mask": MegatronBertForMaskedLM, "question-answering": MegatronBertForQuestionAnswering, "text-classification": MegatronBertForSequenceClassification, "text-generation": MegatronBertForCausalLM, "token-classification": MegatronBertForTokenClassification, "zero-shot": MegatronBertForSequenceClassification, } if is_torch_available() else {} ) a__ = True # test_resize_embeddings = False a__ = False def A__ ( self , __snake_case , __snake_case , __snake_case=False): _UpperCamelCase : str = super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case) if return_labels: if model_class in get_values(__snake_case): _UpperCamelCase : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case) _UpperCamelCase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__snake_case) return inputs_dict def A__ ( self): _UpperCamelCase : Any = MegatronBertModelTester(self) _UpperCamelCase : int = ConfigTester(self , config_class=__snake_case , hidden_size=37) def A__ ( self): self.config_tester.run_common_tests() def A__ ( self): _UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case) def A__ ( self): _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case) def A__ ( self): _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case) def A__ ( self): _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case) def A__ ( self): _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case) def A__ ( self): _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case) def lowerCamelCase_ ( UpperCAmelCase_ : str ) -> Optional[Any]: '''simple docstring''' return torch.tensor( UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , ) lowerCAmelCase__ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class lowercase ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip('Model is not available.') def A__ ( self): _UpperCamelCase : int = 'nvidia/megatron-bert-uncased-345m' if "MYDIR" in os.environ: _UpperCamelCase : int = os.path.join(os.environ['MYDIR'] , __snake_case) _UpperCamelCase : Optional[int] = MegatronBertModel.from_pretrained(__snake_case) model.to(__snake_case) model.half() _UpperCamelCase : Optional[Any] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]]) with torch.no_grad(): _UpperCamelCase : str = model(__snake_case)[0] _UpperCamelCase : Optional[int] = torch.Size((1, 9, 10_24)) self.assertEqual(output.shape , __snake_case) _UpperCamelCase : Union[str, Any] = [-0.6_0_4_0, -0.2_5_1_7, -0.1_0_2_5, 0.3_4_2_0, -0.6_7_5_8, -0.0_0_1_7, -0.1_0_8_9, -0.1_9_9_0, 0.5_7_2_8] for ii in range(3): for jj in range(3): _UpperCamelCase : Optional[Any] = output[0, ii, jj] _UpperCamelCase : Dict = expected[3 * ii + jj] _UpperCamelCase : Optional[int] = 'ii={} jj={} a={} b={}'.format(__snake_case , __snake_case , __snake_case , __snake_case) self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case) , msg=__snake_case)
648
1
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case=13 , __snake_case=32 , __snake_case=2 , __snake_case=3 , __snake_case=16 , __snake_case=[32, 64, 1_28] , __snake_case=[1, 2, 1] , __snake_case=[2, 2, 4] , __snake_case=2 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=True , __snake_case=0.0_2 , __snake_case=1e-5 , __snake_case=True , __snake_case=None , __snake_case=True , __snake_case=10 , __snake_case=8 , __snake_case=["stage1", "stage2"] , __snake_case=[1, 2] , ): _UpperCamelCase : Optional[int] = parent _UpperCamelCase : List[str] = batch_size _UpperCamelCase : Union[str, Any] = image_size _UpperCamelCase : List[Any] = patch_size _UpperCamelCase : Dict = num_channels _UpperCamelCase : List[Any] = embed_dim _UpperCamelCase : Tuple = hidden_sizes _UpperCamelCase : int = depths _UpperCamelCase : int = num_heads _UpperCamelCase : Optional[int] = window_size _UpperCamelCase : List[Any] = mlp_ratio _UpperCamelCase : Optional[int] = qkv_bias _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : str = drop_path_rate _UpperCamelCase : Union[str, Any] = hidden_act _UpperCamelCase : Union[str, Any] = use_absolute_embeddings _UpperCamelCase : Optional[Any] = patch_norm _UpperCamelCase : Tuple = layer_norm_eps _UpperCamelCase : Tuple = initializer_range _UpperCamelCase : str = is_training _UpperCamelCase : int = scope _UpperCamelCase : Union[str, Any] = use_labels _UpperCamelCase : str = type_sequence_label_size _UpperCamelCase : Any = encoder_stride _UpperCamelCase : str = out_features _UpperCamelCase : Optional[Any] = out_indices def A__ ( self): _UpperCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _UpperCamelCase : List[str] = None if self.use_labels: _UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCamelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def A__ ( self): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Optional[Any] = FocalNetModel(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Union[str, Any] = model(__snake_case) _UpperCamelCase : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _UpperCamelCase : Any = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Union[str, Any] = FocalNetBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : str = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1]) # verify backbone works with out_features=None _UpperCamelCase : List[Any] = None _UpperCamelCase : Optional[Any] = FocalNetBackbone(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : List[Any] = FocalNetForMaskedImageModeling(config=__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Any = model(__snake_case) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images _UpperCamelCase : Optional[int] = 1 _UpperCamelCase : Any = FocalNetForMaskedImageModeling(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _UpperCamelCase : str = model(__snake_case) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def A__ ( self , __snake_case , __snake_case , __snake_case): _UpperCamelCase : int = self.type_sequence_label_size _UpperCamelCase : Dict = FocalNetForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Tuple = model(__snake_case , labels=__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _UpperCamelCase : Dict = 1 _UpperCamelCase : Optional[int] = FocalNetForImageClassification(__snake_case) model.to(__snake_case) model.eval() _UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _UpperCamelCase : Optional[int] = model(__snake_case) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def A__ ( self): _UpperCamelCase : Any = self.prepare_config_and_inputs() _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = config_and_inputs _UpperCamelCase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _lowercase , _lowercase , unittest.TestCase ): """simple docstring""" a__ = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) a__ = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) a__ = False a__ = False a__ = False a__ = False a__ = False def A__ ( self): _UpperCamelCase : Optional[int] = FocalNetModelTester(self) _UpperCamelCase : Tuple = ConfigTester(self , config_class=__snake_case , embed_dim=37 , has_text_modality=__snake_case) def A__ ( self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def A__ ( self): return def A__ ( self): _UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case) def A__ ( self): _UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__snake_case) def A__ ( self): _UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__snake_case) def A__ ( self): _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__snake_case) @unittest.skip(reason='FocalNet does not use inputs_embeds') def A__ ( self): pass @unittest.skip(reason='FocalNet does not use feedforward chunking') def A__ ( self): pass def A__ ( self): _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _UpperCamelCase : int = model_class(__snake_case) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCamelCase : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__snake_case , nn.Linear)) def A__ ( self): _UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _UpperCamelCase : Tuple = model_class(__snake_case) _UpperCamelCase : str = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCamelCase : str = [*signature.parameters.keys()] _UpperCamelCase : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __snake_case) def A__ ( self , __snake_case , __snake_case , __snake_case , __snake_case): _UpperCamelCase : Union[str, Any] = model_class(__snake_case) model.to(__snake_case) model.eval() with torch.no_grad(): _UpperCamelCase : Any = model(**self._prepare_for_class(__snake_case , __snake_case)) _UpperCamelCase : Tuple = outputs.hidden_states _UpperCamelCase : Dict = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1) self.assertEqual(len(__snake_case) , __snake_case) # FocalNet has a different seq_length _UpperCamelCase : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _UpperCamelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) _UpperCamelCase : List[str] = outputs.reshaped_hidden_states self.assertEqual(len(__snake_case) , __snake_case) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Union[str, Any] = reshaped_hidden_states[0].shape _UpperCamelCase : int = ( reshaped_hidden_states[0].view(__snake_case , __snake_case , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def A__ ( self): _UpperCamelCase , _UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _UpperCamelCase : List[str] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : str = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , __snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : str = 3 _UpperCamelCase : Optional[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _UpperCamelCase : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _UpperCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _UpperCamelCase : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _UpperCamelCase : List[str] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCamelCase : List[Any] = True self.check_hidden_states_output(__snake_case , __snake_case , __snake_case , (padded_height, padded_width)) @slow def A__ ( self): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCamelCase : int = FocalNetModel.from_pretrained(__snake_case) self.assertIsNotNone(__snake_case) def A__ ( self): _UpperCamelCase , _UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() _UpperCamelCase : List[str] = _config_zero_init(__snake_case) for model_class in self.all_model_classes: _UpperCamelCase : Dict = model_class(config=__snake_case) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) @require_vision @require_torch class lowercase ( unittest.TestCase ): """simple docstring""" @cached_property def A__ ( self): # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None @slow def A__ ( self): _UpperCamelCase : int = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(__snake_case) _UpperCamelCase : List[str] = self.default_image_processor _UpperCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') _UpperCamelCase : Union[str, Any] = image_processor(images=__snake_case , return_tensors='pt').to(__snake_case) # forward pass with torch.no_grad(): _UpperCamelCase : Optional[int] = model(**__snake_case) # verify the logits _UpperCamelCase : Union[str, Any] = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , __snake_case) _UpperCamelCase : Tuple = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1]).to(__snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __snake_case , atol=1e-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_81) @require_torch class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = (FocalNetBackbone,) if is_torch_available() else () a__ = FocalNetConfig a__ = False def A__ ( self): _UpperCamelCase : List[Any] = FocalNetModelTester(self)
648
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = """▁""" lowerCAmelCase__ = {"""vocab_file""": """sentencepiece.bpe.model"""} lowerCAmelCase__ = { """vocab_file""": { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model""" ), } } lowerCAmelCase__ = { """xlm-roberta-base""": 5_1_2, """xlm-roberta-large""": 5_1_2, """xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2, """xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2, """xlm-roberta-large-finetuned-conll03-english""": 5_1_2, """xlm-roberta-large-finetuned-conll03-german""": 5_1_2, } class lowercase ( _lowercase ): """simple docstring""" a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["input_ids", "attention_mask"] def __init__( self , __snake_case , __snake_case="<s>" , __snake_case="</s>" , __snake_case="</s>" , __snake_case="<s>" , __snake_case="<unk>" , __snake_case="<pad>" , __snake_case="<mask>" , __snake_case = None , **__snake_case , ): # Mask token behave like a normal word, i.e. include the space before it _UpperCamelCase : Dict = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case) if isinstance(__snake_case , __snake_case) else mask_token _UpperCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , ) _UpperCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(__snake_case)) _UpperCamelCase : Dict = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCamelCase : int = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCamelCase : List[Any] = 1 _UpperCamelCase : Any = len(self.sp_model) + self.fairseq_offset _UpperCamelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self): _UpperCamelCase : List[Any] = self.__dict__.copy() _UpperCamelCase : Optional[Any] = None _UpperCamelCase : Any = self.sp_model.serialized_model_proto() return state def __setstate__( self , __snake_case): _UpperCamelCase : int = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): _UpperCamelCase : Tuple = {} _UpperCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def A__ ( self , __snake_case , __snake_case = None): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCamelCase : Tuple = [self.cls_token_id] _UpperCamelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A__ ( self , __snake_case , __snake_case = None , __snake_case = False): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case) if token_ids_a is None: return [1] + ([0] * len(__snake_case)) + [1] return [1] + ([0] * len(__snake_case)) + [1, 1] + ([0] * len(__snake_case)) + [1] def A__ ( self , __snake_case , __snake_case = None): _UpperCamelCase : Optional[Any] = [self.sep_token_id] _UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] @property def A__ ( self): return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token def A__ ( self): _UpperCamelCase : List[str] = {self.convert_ids_to_tokens(__snake_case): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def A__ ( self , __snake_case): return self.sp_model.encode(__snake_case , out_type=__snake_case) def A__ ( self , __snake_case): if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCamelCase : str = self.sp_model.PieceToId(__snake_case) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A__ ( self , __snake_case): if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset) def A__ ( self , __snake_case): _UpperCamelCase : Optional[int] = ''.join(__snake_case).replace(__snake_case , ' ').strip() return out_string def A__ ( self , __snake_case , __snake_case = None): if not os.path.isdir(__snake_case): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCamelCase : str = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(__snake_case) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , __snake_case) elif not os.path.isfile(self.vocab_file): with open(__snake_case , 'wb') as fi: _UpperCamelCase : Any = self.sp_model.serialized_model_proto() fi.write(__snake_case) return (out_vocab_file,)
648
1
from __future__ import annotations lowerCAmelCase__ = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } class lowercase : """simple docstring""" def __init__( self , __snake_case , __snake_case): _UpperCamelCase : Tuple = graph # mapping node to its parent in resulting breadth first tree _UpperCamelCase : dict[str, str | None] = {} _UpperCamelCase : Any = source_vertex def A__ ( self): _UpperCamelCase : Tuple = {self.source_vertex} _UpperCamelCase : List[str] = None _UpperCamelCase : Union[str, Any] = [self.source_vertex] # first in first out queue while queue: _UpperCamelCase : Any = queue.pop(0) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(__snake_case) _UpperCamelCase : Any = vertex queue.append(__snake_case) def A__ ( self , __snake_case): if target_vertex == self.source_vertex: return self.source_vertex _UpperCamelCase : Optional[Any] = self.parent.get(__snake_case) if target_vertex_parent is None: _UpperCamelCase : Optional[int] = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(__snake_case) return self.shortest_path(__snake_case) + f'''->{target_vertex}''' if __name__ == "__main__": lowerCAmelCase__ = Graph(graph, """G""") g.breath_first_search() print(g.shortest_path("""D""")) print(g.shortest_path("""G""")) print(g.shortest_path("""Foo"""))
648
from ...processing_utils import ProcessorMixin class lowercase ( _lowercase ): """simple docstring""" a__ = ["image_processor", "feature_extractor"] a__ = "TvltImageProcessor" a__ = "TvltFeatureExtractor" def __init__( self , __snake_case , __snake_case): super().__init__(image_processor=__snake_case , feature_extractor=__snake_case) _UpperCamelCase : List[str] = image_processor _UpperCamelCase : Dict = feature_extractor def __call__( self , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=False , __snake_case=False , *__snake_case , **__snake_case , ): if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.') _UpperCamelCase : Union[str, Any] = None if images is not None: _UpperCamelCase : Tuple = self.image_processor(__snake_case , mask_pixel=__snake_case , *__snake_case , **__snake_case) if images_mixed is not None: _UpperCamelCase : Union[str, Any] = self.image_processor(__snake_case , is_mixed=__snake_case , *__snake_case , **__snake_case) if audio is not None: _UpperCamelCase : Tuple = self.feature_extractor( __snake_case , *__snake_case , sampling_rate=__snake_case , mask_audio=__snake_case , **__snake_case) _UpperCamelCase : Tuple = {} if audio is not None: output_dict.update(__snake_case) if images is not None: output_dict.update(__snake_case) if images_mixed_dict is not None: output_dict.update(__snake_case) return output_dict @property def A__ ( self): _UpperCamelCase : List[Any] = self.image_processor.model_input_names _UpperCamelCase : List[Any] = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names))
648
1
from typing import Union import fire import torch from tqdm import tqdm def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str = "cpu" , UpperCAmelCase_ : Union[str, None] = None ) -> None: '''simple docstring''' _UpperCamelCase : Tuple = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCAmelCase_ , torch.Tensor ): raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' ) _UpperCamelCase : Dict = v.half() if save_path is None: # overwrite src_path _UpperCamelCase : str = src_path torch.save(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": fire.Fire(convert)
648
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""", """RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""", """RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""", """RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""", """RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""", """RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""", """RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""", """RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""", """RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""", """RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""", } class lowercase ( _lowercase ): """simple docstring""" a__ = "rwkv" a__ = {"max_position_embeddings": "context_length"} def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ): _UpperCamelCase : str = vocab_size _UpperCamelCase : int = context_length _UpperCamelCase : Tuple = hidden_size _UpperCamelCase : Tuple = num_hidden_layers _UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size _UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size _UpperCamelCase : Union[str, Any] = layer_norm_epsilon _UpperCamelCase : Dict = rescale_every _UpperCamelCase : Optional[Any] = use_cache _UpperCamelCase : str = bos_token_id _UpperCamelCase : Optional[Any] = eos_token_id super().__init__( tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
648
1
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING import torch from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class lowercase ( _lowercase ): """simple docstring""" a__ = "dandelin/vilt-b32-finetuned-vqa" a__ = ( "This is a tool that answers a question about an image. It takes an input named `image` which should be the " "image containing the information, as well as a `question` which should be the question in English. It " "returns a text that is the answer to the question." ) a__ = "image_qa" a__ = AutoProcessor a__ = AutoModelForVisualQuestionAnswering a__ = ["image", "text"] a__ = ["text"] def __init__( self , *__snake_case , **__snake_case): requires_backends(self , ['vision']) super().__init__(*__snake_case , **__snake_case) def A__ ( self , __snake_case , __snake_case): return self.pre_processor(__snake_case , __snake_case , return_tensors='pt') def A__ ( self , __snake_case): with torch.no_grad(): return self.model(**__snake_case).logits def A__ ( self , __snake_case): _UpperCamelCase : Tuple = outputs.argmax(-1).item() return self.model.config.idalabel[idx]
648
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class lowercase ( _lowercase ): """simple docstring""" a__ = "bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="absolute" , __snake_case=True , __snake_case=None , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : int = vocab_size _UpperCamelCase : Optional[Any] = hidden_size _UpperCamelCase : Optional[Any] = num_hidden_layers _UpperCamelCase : List[str] = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : Optional[Any] = intermediate_size _UpperCamelCase : Union[str, Any] = hidden_dropout_prob _UpperCamelCase : Tuple = attention_probs_dropout_prob _UpperCamelCase : Optional[int] = max_position_embeddings _UpperCamelCase : str = type_vocab_size _UpperCamelCase : Optional[Any] = initializer_range _UpperCamelCase : List[str] = layer_norm_eps _UpperCamelCase : Any = position_embedding_type _UpperCamelCase : Any = use_cache _UpperCamelCase : Any = classifier_dropout class lowercase ( _lowercase ): """simple docstring""" @property def A__ ( self): if self.task == "multiple-choice": _UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _UpperCamelCase : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
648
1
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowerCAmelCase__ = """\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } """ lowerCAmelCase__ = """\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). """ lowerCAmelCase__ = """ Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric(\"code_eval\") >>> test_cases = [\"assert add(2,3)==5\"] >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {'pass@1': 0.5, 'pass@2': 1.0} """ lowerCAmelCase__ = """ ################################################################################ !!!WARNING!!! ################################################################################ The \"code_eval\" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper \"Evaluating Large Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this with: >>> import os >>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\" ################################################################################\ """ lowerCAmelCase__ = """The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase ( datasets.Metric ): """simple docstring""" def A__ ( self): return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string')), 'references': datasets.Value('string'), }) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def A__ ( self , __snake_case , __snake_case , __snake_case=[1, 10, 1_00] , __snake_case=4 , __snake_case=3.0): if os.getenv('HF_ALLOW_CODE_EVAL' , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.') with ThreadPoolExecutor(max_workers=__snake_case) as executor: _UpperCamelCase : int = [] _UpperCamelCase : Tuple = Counter() _UpperCamelCase : List[str] = 0 _UpperCamelCase : Optional[int] = defaultdict(__snake_case) for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case)): for candidate in candidates: _UpperCamelCase : str = candidate + '\n' + test_case _UpperCamelCase : List[Any] = (test_program, timeout, task_id, completion_id[task_id]) _UpperCamelCase : List[Any] = executor.submit(__snake_case , *__snake_case) futures.append(__snake_case) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__snake_case): _UpperCamelCase : Optional[Any] = future.result() results[result["task_id"]].append((result['completion_id'], result)) _UpperCamelCase , _UpperCamelCase : Optional[Any] = [], [] for result in results.values(): result.sort() _UpperCamelCase : List[Any] = [r[1]['passed'] for r in result] total.append(len(__snake_case)) correct.append(sum(__snake_case)) _UpperCamelCase : List[Any] = np.array(__snake_case) _UpperCamelCase : List[str] = np.array(__snake_case) _UpperCamelCase : List[str] = k _UpperCamelCase : Optional[int] = {f'''pass@{k}''': estimate_pass_at_k(__snake_case , __snake_case , __snake_case).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ) -> int: '''simple docstring''' def estimator(UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): _UpperCamelCase : List[Any] = itertools.repeat(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) else: assert len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) _UpperCamelCase : Any = iter(UpperCAmelCase_ ) return np.array([estimator(int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) , UpperCAmelCase_ ) for n, c in zip(UpperCAmelCase_ , UpperCAmelCase_ )] )
648
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class lowercase ( _lowercase ): """simple docstring""" a__ = "facebook/bart-large-mnli" a__ = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a__ = "text_classifier" a__ = AutoTokenizer a__ = AutoModelForSequenceClassification a__ = ["text", ["text"]] a__ = ["text"] def A__ ( self): super().setup() _UpperCamelCase : List[Any] = self.model.config _UpperCamelCase : Optional[int] = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('entail'): _UpperCamelCase : Tuple = int(__snake_case) if self.entailment_id == -1: raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.') def A__ ( self , __snake_case , __snake_case): _UpperCamelCase : List[Any] = labels return self.pre_processor( [text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , ) def A__ ( self , __snake_case): _UpperCamelCase : str = outputs.logits _UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
648
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase__ = 1_6 lowerCAmelCase__ = 3_2 def lowerCamelCase_ ( UpperCAmelCase_ : Accelerator , UpperCAmelCase_ : int = 1_6 ) -> Union[str, Any]: '''simple docstring''' _UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' ) _UpperCamelCase : Tuple = load_dataset('glue' , 'mrpc' ) def tokenize_function(UpperCAmelCase_ : Optional[int] ): # max_length=None => use the model max length (it's actually the default) _UpperCamelCase : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCamelCase : Any = datasets.map( UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCamelCase : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(UpperCAmelCase_ : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCamelCase : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCamelCase : Union[str, Any] = 1_6 elif accelerator.mixed_precision != "no": _UpperCamelCase : Dict = 8 else: _UpperCamelCase : Optional[Any] = None return tokenizer.pad( UpperCAmelCase_ , padding='longest' , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors='pt' , ) # Instantiate dataloaders. _UpperCamelCase : int = DataLoader( tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ ) _UpperCamelCase : Any = DataLoader( tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders lowerCAmelCase__ = mocked_dataloaders # noqa: F811 def lowerCamelCase_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] ) -> str: '''simple docstring''' if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCAmelCase_ ) == "1": _UpperCamelCase : int = 2 # New Code # _UpperCamelCase : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator _UpperCamelCase : List[Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=UpperCAmelCase_ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCamelCase : List[str] = config['lr'] _UpperCamelCase : str = int(config['num_epochs'] ) _UpperCamelCase : Optional[Any] = int(config['seed'] ) _UpperCamelCase : List[Any] = int(config['batch_size'] ) _UpperCamelCase : str = evaluate.load('glue' , 'mrpc' ) set_seed(UpperCAmelCase_ ) _UpperCamelCase , _UpperCamelCase : Optional[int] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCAmelCase_ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCamelCase : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer _UpperCamelCase : Optional[Any] = AdamW(params=model.parameters() , lr=UpperCAmelCase_ ) # Instantiate scheduler _UpperCamelCase : Tuple = get_linear_schedule_with_warmup( optimizer=UpperCAmelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Tuple = accelerator.prepare( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # Now we train the model for epoch in range(UpperCAmelCase_ ): model.train() for step, batch in enumerate(UpperCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(UpperCAmelCase_ ): _UpperCamelCase : Any = model(**UpperCAmelCase_ ) _UpperCamelCase : List[Any] = output.loss accelerator.backward(UpperCAmelCase_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCAmelCase_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _UpperCamelCase : Dict = model(**UpperCAmelCase_ ) _UpperCamelCase : Tuple = outputs.logits.argmax(dim=-1 ) _UpperCamelCase , _UpperCamelCase : int = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , ) _UpperCamelCase : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ ) def lowerCamelCase_ ( ) -> Tuple: '''simple docstring''' _UpperCamelCase : Dict = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=UpperCAmelCase_ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) _UpperCamelCase : Dict = parser.parse_args() _UpperCamelCase : Any = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6} training_function(UpperCAmelCase_ , UpperCAmelCase_ ) if __name__ == "__main__": main()
648
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) lowerCAmelCase__ = { """configuration_blip""": [ """BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BlipConfig""", """BlipTextConfig""", """BlipVisionConfig""", ], """processing_blip""": ["""BlipProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ["""BlipImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """BlipModel""", """BlipPreTrainedModel""", """BlipForConditionalGeneration""", """BlipForQuestionAnswering""", """BlipVisionModel""", """BlipTextModel""", """BlipForImageTextRetrieval""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ """TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFBlipModel""", """TFBlipPreTrainedModel""", """TFBlipForConditionalGeneration""", """TFBlipForQuestionAnswering""", """TFBlipVisionModel""", """TFBlipTextModel""", """TFBlipForImageTextRetrieval""", ] if TYPE_CHECKING: from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig from .processing_blip import BlipProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_blip import BlipImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip import ( BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, BlipModel, BlipPreTrainedModel, BlipTextModel, BlipVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blip import ( TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFBlipForConditionalGeneration, TFBlipForImageTextRetrieval, TFBlipForQuestionAnswering, TFBlipModel, TFBlipPreTrainedModel, TFBlipTextModel, TFBlipVisionModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
1
def lowerCamelCase_ ( UpperCAmelCase_ : int=2_8_1_2_3 ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase : Tuple = [1] * (limit + 1) for i in range(2 , int(limit**0.5 ) + 1 ): sum_divs[i * i] += i for k in range(i + 1 , limit // i + 1 ): sum_divs[k * i] += k + i _UpperCamelCase : List[Any] = set() _UpperCamelCase : Tuple = 0 for n in range(1 , limit + 1 ): if sum_divs[n] > n: abundants.add(UpperCAmelCase_ ) if not any((n - a in abundants) for a in abundants ): res += n return res if __name__ == "__main__": print(solution())
648
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
648
1
from typing import TYPE_CHECKING from ...utils import _LazyModule lowerCAmelCase__ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]} if TYPE_CHECKING: from .tokenization_byta import ByTaTokenizer else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
648
import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase__ = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp lowerCAmelCase__ = 5 lowerCAmelCase__ = 1_0 @require_sentencepiece @require_tokenizers class lowercase ( _lowercase , unittest.TestCase ): """simple docstring""" a__ = SpeechaTextTokenizer a__ = False a__ = True def A__ ( self): super().setUp() _UpperCamelCase : Any = sp.SentencePieceProcessor() spm_model.Load(__snake_case) _UpperCamelCase : List[str] = ['<s>', '<pad>', '</s>', '<unk>'] vocab += [spm_model.IdToPiece(id_) for id_ in range(len(__snake_case))] _UpperCamelCase : Dict = dict(zip(__snake_case , range(len(__snake_case)))) _UpperCamelCase : Tuple = Path(self.tmpdirname) save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['vocab_file']) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['spm_file']) _UpperCamelCase : int = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) tokenizer.save_pretrained(self.tmpdirname) def A__ ( self): _UpperCamelCase : str = '<pad>' _UpperCamelCase : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case) , __snake_case) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case) , __snake_case) def A__ ( self): _UpperCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(vocab_keys[-1] , 'j') self.assertEqual(len(__snake_case) , 10_01) def A__ ( self): self.assertEqual(self.get_tokenizer().vocab_size , 10_01) def A__ ( self): _UpperCamelCase : Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname) _UpperCamelCase : List[str] = tokenizer.tokenize('This is a test') self.assertListEqual(__snake_case , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(__snake_case) , [2_89, 50, 14, 1_74, 3_86] , ) _UpperCamelCase : int = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , ) _UpperCamelCase : int = tokenizer.convert_tokens_to_ids(__snake_case) self.assertListEqual(__snake_case , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8]) _UpperCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__snake_case) self.assertListEqual( __snake_case , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , ) @slow def A__ ( self): # fmt: off _UpperCamelCase : Optional[int] = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__snake_case , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , ) @require_sentencepiece class lowercase ( unittest.TestCase ): """simple docstring""" a__ = "valhalla/s2t_mustc_multilinguial_medium" a__ = "C'est trop cool" a__ = "Esto es genial" @classmethod def A__ ( cls): _UpperCamelCase : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name) return cls def A__ ( self): self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4) self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6) self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9) self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11) def A__ ( self): self.assertEqual(self.tokenizer.vocab_size , 1_00_00) def A__ ( self): self.assertIn(__snake_case , self.tokenizer.all_special_ids) _UpperCamelCase : Optional[int] = [ES_CODE, 4, 16_01, 47, 76_47, 2] _UpperCamelCase : Tuple = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case) _UpperCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case) self.assertEqual(__snake_case , __snake_case) self.assertNotIn(self.tokenizer.eos_token , __snake_case) def A__ ( self): _UpperCamelCase : Any = 'fr' _UpperCamelCase : List[Any] = self.tokenizer(self.french_text).input_ids self.assertEqual(encoded[0] , __snake_case) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id) def A__ ( self): _UpperCamelCase : Union[str, Any] = 'fr' self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE]) _UpperCamelCase : List[str] = 'es' self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
648
1
import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() lowerCAmelCase__ = logging.get_logger("""transformers.models.speecht5""") def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] ) -> List[str]: '''simple docstring''' hf_model.apply_weight_norm() _UpperCamelCase : Any = checkpoint['input_conv.weight_g'] _UpperCamelCase : Dict = checkpoint['input_conv.weight_v'] _UpperCamelCase : List[Any] = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): _UpperCamelCase : Optional[int] = checkpoint[F'''upsamples.{i}.1.weight_g'''] _UpperCamelCase : List[Any] = checkpoint[F'''upsamples.{i}.1.weight_v'''] _UpperCamelCase : Optional[int] = checkpoint[F'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): _UpperCamelCase : Union[str, Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g'''] _UpperCamelCase : List[Any] = checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v'''] _UpperCamelCase : str = checkpoint[F'''blocks.{i}.convs1.{j}.1.bias'''] _UpperCamelCase : List[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g'''] _UpperCamelCase : List[str] = checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v'''] _UpperCamelCase : Optional[Any] = checkpoint[F'''blocks.{i}.convs2.{j}.1.bias'''] _UpperCamelCase : Optional[Any] = checkpoint['output_conv.1.weight_g'] _UpperCamelCase : Optional[Any] = checkpoint['output_conv.1.weight_v'] _UpperCamelCase : Dict = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def lowerCamelCase_ ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , ) -> Tuple: '''simple docstring''' if config_path is not None: _UpperCamelCase : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(UpperCAmelCase_ ) else: _UpperCamelCase : List[Any] = SpeechTaHifiGanConfig() _UpperCamelCase : int = SpeechTaHifiGan(UpperCAmelCase_ ) _UpperCamelCase : List[str] = torch.load(UpperCAmelCase_ ) load_weights(orig_checkpoint['model']['generator'] , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Tuple = np.load(UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = stats[0].reshape(-1 ) _UpperCamelCase : List[Any] = stats[1].reshape(-1 ) _UpperCamelCase : Optional[Any] = torch.from_numpy(UpperCAmelCase_ ).float() _UpperCamelCase : str = torch.from_numpy(UpperCAmelCase_ ).float() model.save_pretrained(UpperCAmelCase_ ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(UpperCAmelCase_ ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""") parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) lowerCAmelCase__ = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
648
import logging from transformers.configuration_utils import PretrainedConfig lowerCAmelCase__ = logging.getLogger(__name__) class lowercase ( _lowercase ): """simple docstring""" a__ = "masked_bert" def __init__( self , __snake_case=3_05_22 , __snake_case=7_68 , __snake_case=12 , __snake_case=12 , __snake_case=30_72 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=5_12 , __snake_case=2 , __snake_case=0.0_2 , __snake_case=1e-12 , __snake_case=0 , __snake_case="topK" , __snake_case="constant" , __snake_case=0.0 , **__snake_case , ): super().__init__(pad_token_id=__snake_case , **__snake_case) _UpperCamelCase : List[Any] = vocab_size _UpperCamelCase : Union[str, Any] = hidden_size _UpperCamelCase : Optional[int] = num_hidden_layers _UpperCamelCase : Any = num_attention_heads _UpperCamelCase : int = hidden_act _UpperCamelCase : str = intermediate_size _UpperCamelCase : str = hidden_dropout_prob _UpperCamelCase : Any = attention_probs_dropout_prob _UpperCamelCase : Tuple = max_position_embeddings _UpperCamelCase : Dict = type_vocab_size _UpperCamelCase : str = initializer_range _UpperCamelCase : List[Any] = layer_norm_eps _UpperCamelCase : Tuple = pruning_method _UpperCamelCase : Tuple = mask_init _UpperCamelCase : Dict = mask_scale
648
1
import math from datetime import datetime, timedelta def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> datetime: '''simple docstring''' _UpperCamelCase : Tuple = year % 1_9 _UpperCamelCase : int = year % 4 _UpperCamelCase : List[Any] = year % 7 _UpperCamelCase : Optional[int] = math.floor(year / 1_0_0 ) _UpperCamelCase : Optional[Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 ) _UpperCamelCase : Any = leap_day_inhibits / 4 _UpperCamelCase : List[str] = ( 1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 3_0 _UpperCamelCase : Optional[int] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 _UpperCamelCase : Optional[int] = (1_9 * metonic_cycle + secular_moon_shift) % 3_0 # PHM -> Paschal Full Moon _UpperCamelCase : Optional[int] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 2_9 and days_from_phm_to_sunday == 6: return datetime(UpperCAmelCase_ , 4 , 1_9 ) elif days_to_add == 2_8 and days_from_phm_to_sunday == 6: return datetime(UpperCAmelCase_ , 4 , 1_8 ) else: return datetime(UpperCAmelCase_ , 3 , 2_2 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3): lowerCAmelCase__ = """will be""" if year > datetime.now().year else """was""" print(f'Easter in {year} {tense} {gauss_easter(year)}')
648
import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow lowerCAmelCase__ = False class lowercase ( unittest.TestCase ): """simple docstring""" def A__ ( self , __snake_case=32): set_seed(0) _UpperCamelCase : int = UNetaDModel(sample_size=__snake_case , in_channels=3 , out_channels=3) _UpperCamelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1) return model, optimizer @slow def A__ ( self): _UpperCamelCase : Tuple = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCamelCase : List[Any] = DDPMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) _UpperCamelCase : List[Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='linear' , clip_sample=__snake_case , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0) _UpperCamelCase : Optional[Any] = [torch.randn((4, 3, 32, 32)).clip(-1 , 1).to(__snake_case) for _ in range(4)] _UpperCamelCase : str = [torch.randn((4, 3, 32, 32)).to(__snake_case) for _ in range(4)] _UpperCamelCase : int = [torch.randint(0 , 10_00 , (4,)).long().to(__snake_case) for _ in range(4)] # train with a DDPM scheduler _UpperCamelCase , _UpperCamelCase : List[Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : int = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Any = model(__snake_case , timesteps[i]).sample _UpperCamelCase : str = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCamelCase , _UpperCamelCase : Union[str, Any] = self.get_model_optimizer(resolution=32) model.train().to(__snake_case) for i in range(4): optimizer.zero_grad() _UpperCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i]) _UpperCamelCase : Dict = model(__snake_case , timesteps[i]).sample _UpperCamelCase : Tuple = torch.nn.functional.mse_loss(__snake_case , noise[i]) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5)) self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5))
648
1
def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> int: '''simple docstring''' return 1 if digit in (0, 1) else (digit * factorial(digit - 1 )) def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> bool: '''simple docstring''' _UpperCamelCase : List[Any] = 0 _UpperCamelCase : str = number while duplicate > 0: _UpperCamelCase , _UpperCamelCase : int = divmod(UpperCAmelCase_ , 1_0 ) fact_sum += factorial(UpperCAmelCase_ ) return fact_sum == number if __name__ == "__main__": print("""Program to check whether a number is a Krisnamurthy Number or not.""") lowerCAmelCase__ = int(input("""Enter number: """).strip()) print( f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.' )
648
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) lowerCAmelCase__ = { """sample_size""": 3_2, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [3_2, 6_4], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 6_4, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_0_0_0, """block_out_channels""": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """sample_size""": 2_5_6, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], """attention_head_dim""": 6_4, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } lowerCAmelCase__ = { """num_train_timesteps""": 4_0, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 2_0_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } lowerCAmelCase__ = { """num_train_timesteps""": 1_5_1, """sigma_min""": 0.0_02, """sigma_max""": 80.0, } def lowerCamelCase_ ( UpperCAmelCase_ : int ) -> List[str]: '''simple docstring''' if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def lowerCamelCase_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any]=False ) -> str: '''simple docstring''' _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.in_layers.0.weight'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.0.bias'''] _UpperCamelCase : str = checkpoint[F'''{old_prefix}.in_layers.2.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.in_layers.2.bias'''] _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.emb_layers.1.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.emb_layers.1.bias'''] _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.weight'''] _UpperCamelCase : List[Any] = checkpoint[F'''{old_prefix}.out_layers.0.bias'''] _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.3.weight'''] _UpperCamelCase : Union[str, Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias'''] if has_skip: _UpperCamelCase : Tuple = checkpoint[F'''{old_prefix}.skip_connection.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any=None ) -> int: '''simple docstring''' _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[Any] = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.norm.weight'''] _UpperCamelCase : Optional[int] = checkpoint[F'''{old_prefix}.norm.bias'''] _UpperCamelCase : List[str] = weight_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = bias_q.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Any = weight_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : List[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Dict = weight_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Tuple = bias_v.squeeze(-1 ).squeeze(-1 ) _UpperCamelCase : Optional[Any] = ( checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) _UpperCamelCase : Dict = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Tuple: '''simple docstring''' _UpperCamelCase : Any = torch.load(UpperCAmelCase_ , map_location='cpu' ) _UpperCamelCase : Union[str, Any] = {} _UpperCamelCase : Optional[int] = checkpoint['time_embed.0.weight'] _UpperCamelCase : List[Any] = checkpoint['time_embed.0.bias'] _UpperCamelCase : Dict = checkpoint['time_embed.2.weight'] _UpperCamelCase : Optional[Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _UpperCamelCase : List[str] = checkpoint['label_emb.weight'] _UpperCamelCase : Optional[int] = checkpoint['input_blocks.0.0.weight'] _UpperCamelCase : Union[str, Any] = checkpoint['input_blocks.0.0.bias'] _UpperCamelCase : Optional[int] = unet_config['down_block_types'] _UpperCamelCase : Optional[Any] = unet_config['layers_per_block'] _UpperCamelCase : Dict = unet_config['attention_head_dim'] _UpperCamelCase : List[str] = unet_config['block_out_channels'] _UpperCamelCase : str = 1 _UpperCamelCase : Optional[int] = channels_list[0] for i, layer_type in enumerate(UpperCAmelCase_ ): _UpperCamelCase : List[str] = channels_list[i] _UpperCamelCase : str = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : str = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : List[Any] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : Any = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(UpperCAmelCase_ ): _UpperCamelCase : List[str] = F'''down_blocks.{i}.resnets.{j}''' _UpperCamelCase : str = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : int = True if j == 0 and downsample_block_has_skip else False _UpperCamelCase : Any = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : Dict = F'''down_blocks.{i}.attentions.{j}''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.1''' _UpperCamelCase : Dict = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : int = F'''down_blocks.{i}.downsamplers.0''' _UpperCamelCase : Optional[int] = F'''input_blocks.{current_layer}.0''' _UpperCamelCase : List[Any] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 _UpperCamelCase : Tuple = current_channels # hardcoded the mid-block for now _UpperCamelCase : Any = 'mid_block.resnets.0' _UpperCamelCase : Optional[Any] = 'middle_block.0' _UpperCamelCase : Tuple = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Optional[Any] = 'mid_block.attentions.0' _UpperCamelCase : Tuple = 'middle_block.1' _UpperCamelCase : Union[str, Any] = convert_attention(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : Tuple = 'mid_block.resnets.1' _UpperCamelCase : str = 'middle_block.2' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = 0 _UpperCamelCase : Optional[int] = unet_config['up_block_types'] for i, layer_type in enumerate(UpperCAmelCase_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : Optional[Any] = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Optional[int] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : str = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Dict = F'''output_blocks.{current_layer-1}.1''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _UpperCamelCase : str = F'''up_blocks.{i}.resnets.{j}''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer}.0''' _UpperCamelCase : Optional[int] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , has_skip=UpperCAmelCase_ ) _UpperCamelCase : int = F'''up_blocks.{i}.attentions.{j}''' _UpperCamelCase : List[Any] = F'''output_blocks.{current_layer}.1''' _UpperCamelCase : Optional[int] = convert_attention( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) current_layer += 1 if i != len(UpperCAmelCase_ ) - 1: _UpperCamelCase : List[Any] = F'''up_blocks.{i}.upsamplers.0''' _UpperCamelCase : Union[str, Any] = F'''output_blocks.{current_layer-1}.2''' _UpperCamelCase : List[str] = convert_resnet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) _UpperCamelCase : List[Any] = checkpoint['out.0.weight'] _UpperCamelCase : str = checkpoint['out.0.bias'] _UpperCamelCase : int = checkpoint['out.2.weight'] _UpperCamelCase : List[Any] = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") lowerCAmelCase__ = parser.parse_args() lowerCAmelCase__ = strabool(args.class_cond) lowerCAmelCase__ = os.path.basename(args.unet_path) print(f'Checkpoint: {ckpt_name}') # Get U-Net config if "imagenet64" in ckpt_name: lowerCAmelCase__ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: lowerCAmelCase__ = TEST_UNET_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') if not args.class_cond: lowerCAmelCase__ = None lowerCAmelCase__ = con_pt_to_diffuser(args.unet_path, unet_config) lowerCAmelCase__ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: lowerCAmelCase__ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: lowerCAmelCase__ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): lowerCAmelCase__ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.') lowerCAmelCase__ = CMStochasticIterativeScheduler(**scheduler_config) lowerCAmelCase__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
648
1