code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING a__ : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class UpperCAmelCase__( UpperCamelCase_ ): '''simple docstring''' def __init__( self : List[str] , **lowerCAmelCase : List[Any]) -> List[str]: """simple docstring""" super().__init__(**_a) if self.framework == "tf": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''') requires_backends(self , 'vision') self.check_model_type(_a) def __call__( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Dict = None , **lowerCAmelCase : List[str] , ) -> Any: """simple docstring""" if "text_queries" in kwargs: lowercase__ = kwargs.pop('text_queries') if isinstance(_a , (str, Image.Image)): lowercase__ = {"""image""": image, """candidate_labels""": candidate_labels} else: lowercase__ = image lowercase__ = super().__call__(_a , **_a) return results def UpperCAmelCase ( self : Any , **lowerCAmelCase : List[str]) -> Dict: """simple docstring""" lowercase__ = {} if "threshold" in kwargs: lowercase__ = kwargs["""threshold"""] if "top_k" in kwargs: lowercase__ = kwargs["""top_k"""] return {}, {}, postprocess_params def UpperCAmelCase ( self : int , lowerCAmelCase : int) -> Optional[Any]: """simple docstring""" lowercase__ = load_image(inputs['image']) lowercase__ = inputs["""candidate_labels"""] if isinstance(_a , _a): lowercase__ = candidate_labels.split(',') lowercase__ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa) for i, candidate_label in enumerate(_a): lowercase__ = self.tokenizer(_a , return_tensors=self.framework) lowercase__ = self.image_processor(_a , return_tensors=self.framework) yield { "is_last": i == len(_a) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def UpperCAmelCase ( self : int , lowerCAmelCase : List[str]) -> Dict: """simple docstring""" lowercase__ = model_inputs.pop('target_size') lowercase__ = model_inputs.pop('candidate_label') lowercase__ = model_inputs.pop('is_last') lowercase__ = self.model(**_a) lowercase__ = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs} return model_outputs def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Any=None) -> Dict: """simple docstring""" lowercase__ = [] for model_output in model_outputs: lowercase__ = model_output["""candidate_label"""] lowercase__ = BaseModelOutput(_a) lowercase__ = self.image_processor.post_process_object_detection( outputs=_a , threshold=_a , target_sizes=model_output['target_size'])[0] for index in outputs["scores"].nonzero(): lowercase__ = outputs["""scores"""][index].item() lowercase__ = self._get_bounding_box(outputs['boxes'][index][0]) lowercase__ = {"""score""": score, """label""": label, """box""": box} results.append(_a) lowercase__ = sorted(_a , key=lambda lowerCAmelCase: x["score"] , reverse=_a) if top_k: lowercase__ = results[:top_k] return results def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Dict) -> Dict[str, int]: """simple docstring""" if self.framework != "pt": raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.') lowercase__ = box.int().tolist() lowercase__ = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
712
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = block_sizes lowercase__ = num_decoder_layers lowercase__ = d_model lowercase__ = n_head lowercase__ = d_head lowercase__ = d_inner lowercase__ = hidden_act lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = 2 lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = initializer_std # Used in the tests to check the size of the first attention layer lowercase__ = n_head # Used in the tests to check the size of the first hidden state lowercase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowercase__ = self.num_hidden_layers + 2 def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str: """simple docstring""" lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = self.num_choices lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) A : Dict = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) A : Optional[int] = False A : Optional[int] = False def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = TFFunnelModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase) @require_tf class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) A : List[str] = False A : int = False def UpperCAmelCase ( self : Any) -> List[Any]: """simple docstring""" lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
642
0
def _lowerCAmelCase ( A__ = 100 ): lowercase__ = n * (n + 1) * (2 * n + 1) / 6 lowercase__ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
713
def _lowerCAmelCase ( A__ , A__ , A__ ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate lowercase__ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowercase__ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
642
0
from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": a__ : Dict = input("Enter image url: ").strip() print(F'''Downloading image from {url} ...''') a__ : int = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image a__ : List[Any] = soup.find("meta", {"property": "og:image"})["content"] a__ : str = requests.get(image_url).content a__ : Any = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg''' with open(file_name, "wb") as fp: fp.write(image_data) print(F'''Done. Image saved to disk as {file_name}.''')
714
from __future__ import annotations def _lowerCAmelCase ( A__ , A__ ): if b == 0: return (1, 0) ((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b ) lowercase__ = a // b return (y, x - k * y) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m def _lowerCAmelCase ( A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) if b < 0: lowercase__ = (b % n + n) % n return b def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="chinese_remainder_theorem", verbose=True) testmod(name="chinese_remainder_theorem2", verbose=True) testmod(name="invert_modulo", verbose=True) testmod(name="extended_euclid", verbose=True)
642
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase__( __lowerCAmelCase , unittest.TestCase ): '''simple docstring''' A : List[str] = KandinskyVaaControlnetImgaImgPipeline A : Tuple = ["image_embeds", "negative_image_embeds", "image", "hint"] A : Dict = ["image_embeds", "negative_image_embeds", "image", "hint"] A : Any = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] A : Optional[int] = False @property def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" return 32 @property def UpperCAmelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" return 32 @property def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return self.time_input_dim @property def UpperCAmelCase ( self : Dict) -> Union[str, Any]: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase ( self : Dict) -> str: """simple docstring""" return 1_00 @property def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" torch.manual_seed(0) lowercase__ = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } lowercase__ = UNetaDConditionModel(**_UpperCamelCase) return model @property def UpperCAmelCase ( self : Optional[int]) -> List[Any]: """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" torch.manual_seed(0) lowercase__ = VQModel(**self.dummy_movq_kwargs) return model def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" lowercase__ = self.dummy_unet lowercase__ = self.dummy_movq lowercase__ = { """num_train_timesteps""": 10_00, """beta_schedule""": """linear""", """beta_start""": 0.0_00_85, """beta_end""": 0.0_12, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } lowercase__ = DDIMScheduler(**_UpperCamelCase) lowercase__ = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Any=0) -> str: """simple docstring""" lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase) lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to( _UpperCamelCase) # create init_image lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1)[0] lowercase__ = Image.fromarray(np.uinta(_UpperCamelCase)).convert('RGB').resize((2_56, 2_56)) # create hint lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase)).to(_UpperCamelCase) if str(_UpperCamelCase).startswith('mps'): lowercase__ = torch.manual_seed(_UpperCamelCase) else: lowercase__ = torch.Generator(device=_UpperCamelCase).manual_seed(_UpperCamelCase) lowercase__ = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" lowercase__ = """cpu""" lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**_UpperCamelCase) lowercase__ = pipe.to(_UpperCamelCase) pipe.set_progress_bar_config(disable=_UpperCamelCase) lowercase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase)) lowercase__ = output.images lowercase__ = pipe( **self.get_dummy_inputs(_UpperCamelCase) , return_dict=_UpperCamelCase , )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [0.54_98_50_34, 0.55_50_93_65, 0.52_56_15_04, 0.5_57_04_94, 0.5_59_38_18, 0.5_26_39_79, 0.50_28_56_43, 0.5_06_98_46, 0.51_19_67_36]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Any) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : Optional[Any]) -> int: """simple docstring""" lowercase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy') lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png') lowercase__ = init_image.resize((5_12, 5_12)) lowercase__ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png') lowercase__ = torch.from_numpy(np.array(_UpperCamelCase)).float() / 2_55.0 lowercase__ = hint.permute(2 , 0 , 1).unsqueeze(0) lowercase__ = """A robot, 4k photo""" lowercase__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa) pipe_prior.to(_UpperCamelCase) lowercase__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa) lowercase__ = pipeline.to(_UpperCamelCase) pipeline.set_progress_bar_config(disable=_UpperCamelCase) lowercase__ = torch.Generator(device='cpu').manual_seed(0) lowercase__ = pipe_prior( _UpperCamelCase , image=_UpperCamelCase , strength=0.85 , generator=_UpperCamelCase , negative_prompt='' , ).to_tuple() lowercase__ = pipeline( image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , hint=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='np' , ) lowercase__ = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase)
715
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[Any] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = "umt5" A : List[str] = ["past_key_values"] def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__( is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split('-') lowercase__ = act_info[-1] lowercase__ = act_info[0] == 'gated' if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": lowercase__ = 'gelu_new' @property def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" return self.d_model @property def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" return self.num_heads @property def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return self.num_layers class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowercase__ = 'past_encoder_sequence + sequence' lowercase__ = {0: 'batch'} lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowercase__ = {0: 'batch', 1: 'decoder_sequence'} lowercase__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCAmelCase ( self : int) -> int: """simple docstring""" return 13 @property def UpperCAmelCase ( self : Optional[Any]) -> float: """simple docstring""" return 5E-4
642
0
from collections.abc import Callable import numpy as np def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ): lowercase__ = int(np.ceil((x_end - xa) / step_size ) ) lowercase__ = np.zeros((n + 1,) ) lowercase__ = ya lowercase__ = xa for k in range(_lowerCamelCase ): lowercase__ = y[k] + step_size * ode_func(_lowerCamelCase , y[k] ) lowercase__ = y[k] + ( (step_size / 2) * (ode_func(_lowerCamelCase , y[k] ) + ode_func(x + step_size , _lowerCamelCase )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
716
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : str = XGLMTokenizer A : List[Any] = XGLMTokenizerFast A : int = True A : Optional[Any] = True def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = '<pad>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : str) -> List[str]: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(len(lowerCAmelCase) , 10_08) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08) def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = tokenizer.tokenize('This is a test') self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return XGLMTokenizer.from_pretrained('facebook/xglm-564M') def UpperCAmelCase ( self : Optional[int]) -> Dict: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase , f.name) lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase) lowercase__ = pickle.dumps(lowerCAmelCase) pickle.loads(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.tokenize(lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @slow def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" lowercase__ = 'Hello World!' lowercase__ = [2, 3_12_27, 44_47, 35] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = { 'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
642
0
def _lowerCAmelCase ( A__ = 100 ): lowercase__ = n * (n + 1) * (2 * n + 1) / 6 lowercase__ = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F'''{solution() = }''')
717
import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCAmelCase__: '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" lowercase__ = data lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64) lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data)) return padded_data def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64) ] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]: """simple docstring""" lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64 for i in range(16 , 80): lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1) return w def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.padding() lowercase__ = self.split_blocks() for block in self.blocks: lowercase__ = self.expand_block(lowerCAmelCase) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h for i in range(0 , 80): if 0 <= i < 20: lowercase__ = (b & c) | ((~b) & d) lowercase__ = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase__ = b ^ c ^ d lowercase__ = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase__ = (b & c) | (b & d) | (c & d) lowercase__ = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase__ = b ^ c ^ d lowercase__ = 0XC_A_6_2_C_1_D_6 lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = ( self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(lowerCAmelCase , 30), c, d, ) lowercase__ = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h) def _lowerCAmelCase ( ): lowercase__ = B'Test String' assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def _lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowercase__ = parser.parse_args() lowercase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase__ = f.read() else: lowercase__ = bytes(A__ , 'utf-8' ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
642
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : List[str] = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class UpperCAmelCase__( _UpperCAmelCase ): '''simple docstring''' A : Tuple = "donut-swin" A : Optional[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Union[str, Any] , lowerCAmelCase : int=2_24 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Any=3 , lowerCAmelCase : Any=96 , lowerCAmelCase : Any=[2, 2, 6, 2] , lowerCAmelCase : str=[3, 6, 12, 24] , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : str=4.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : int=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : int=0.1 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1E-5 , **lowerCAmelCase : Any , ) -> Tuple: """simple docstring""" super().__init__(**lowerCamelCase_) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = depths lowercase__ = len(lowerCamelCase_) lowercase__ = num_heads lowercase__ = window_size lowercase__ = mlp_ratio lowercase__ = qkv_bias lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = drop_path_rate lowercase__ = hidden_act lowercase__ = use_absolute_embeddings lowercase__ = layer_norm_eps lowercase__ = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase__ = int(embed_dim * 2 ** (len(lowerCamelCase_) - 1))
718
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart a__ : List[Any] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } a__ : int = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Optional[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : int = ["input_ids", "attention_mask"] A : Any = BartTokenizer def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type')) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**lowerCAmelCase) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = 'post_processor' lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state['sep']) if "cls" in state: lowercase__ = tuple(state['cls']) lowercase__ = False if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(lowerCAmelCase , state.pop('type')) lowercase__ = component_class(**lowerCAmelCase) setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) @property def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]: """simple docstring""" lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value lowercase__ = value def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase) return tuple(lowerCAmelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple: """simple docstring""" lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
642
0
from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Dict[Optional[str], Type[Formatter]] = {} a__ : Dict[Optional[str], str] = {} a__ : Dict[Optional[str], Exception] = {} def _lowerCAmelCase ( A__ , A__ , A__ = None , ): lowercase__ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( F'''Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})''' ) lowercase__ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( F'''Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})''' ) lowercase__ = format_type def _lowerCAmelCase ( A__ , A__ , A__ = None ): lowercase__ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): lowercase__ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: a__ : Optional[Any] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: a__ : Dict = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: a__ : List[Any] = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def _lowerCAmelCase ( A__ ): if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def _lowerCAmelCase ( A__ , **A__ ): lowercase__ = get_format_type_from_alias(_A ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**_A ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( F'''Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'''' )
719
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : str = (DDIMParallelScheduler,) A : Any = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowerCAmelCase) return config def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**lowerCAmelCase) lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase) for t in scheduler.timesteps: lowercase__ = model(lowerCAmelCase , lowerCAmelCase) lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample return sample def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1) lowercase__ = scheduler_class(**lowerCAmelCase) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1])) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , ) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]): self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]): self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5 def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 scheduler.set_timesteps(lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = self.dummy_sample_deter + 0.1 lowercase__ = self.dummy_sample_deter - 0.1 lowercase__ = samplea.shape[0] lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0) lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase) lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 11_47.79_04) < 1E-2 assert abs(result_mean.item() - 0.49_82) < 1E-3 def UpperCAmelCase ( self : Any) -> int: """simple docstring""" lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_72.00_67) < 1E-2 assert abs(result_mean.item() - 0.22_39_67) < 1E-3 def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(prediction_type='v_prediction') lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 52.53_02) < 1E-2 assert abs(result_mean.item() - 0.06_84) < 1E-3 def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.82_95) < 1E-2 assert abs(result_mean.item() - 0.19_51) < 1E-3 def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.07_84) < 1E-2 assert abs(result_mean.item() - 0.19_41) < 1E-3
642
0
import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device if is_torch_available(): from transformers import AutoModelForSeqaSeqLM, AutoTokenizer @require_torch @require_sentencepiece @require_tokenizers class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : List[str]) -> Tuple: """simple docstring""" lowercase__ = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=snake_case_).to(snake_case_) lowercase__ = AutoTokenizer.from_pretrained('google/mt5-small') lowercase__ = tokenizer('Hello there' , return_tensors='pt').input_ids lowercase__ = tokenizer('Hi I am' , return_tensors='pt').input_ids lowercase__ = model(input_ids.to(snake_case_) , labels=labels.to(snake_case_)).loss lowercase__ = -(labels.shape[-1] * loss.item()) lowercase__ = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
720
import cva import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict: """simple docstring""" if k in (0.04, 0.06): lowercase__ = k lowercase__ = window_size else: raise ValueError('invalid k value') def __str__( self : Tuple) -> str: """simple docstring""" return str(self.k) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowercase__ = cva.imread(lowerCAmelCase , 0) lowercase__, lowercase__ = img.shape lowercase__ = [] lowercase__ = img.copy() lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB) lowercase__, lowercase__ = np.gradient(lowerCAmelCase) lowercase__ = dx**2 lowercase__ = dy**2 lowercase__ = dx * dy lowercase__ = 0.04 lowercase__ = self.window_size // 2 for y in range(lowerCAmelCase , h - offset): for x in range(lowerCAmelCase , w - offset): lowercase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = (wxx * wyy) - (wxy**2) lowercase__ = wxx + wyy lowercase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0) , 0) color_img.itemset((y, x, 1) , 0) color_img.itemset((y, x, 2) , 2_55) return color_img, corner_list if __name__ == "__main__": a__ : Dict = HarrisCorner(0.0_4, 3) a__ , a__ : Dict = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
642
0
from __future__ import annotations import unittest from transformers import DebertaVaConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, TFDebertaVaModel, ) class UpperCAmelCase__: '''simple docstring''' def __init__( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str=13 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Any=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=99 , lowerCAmelCase : str=32 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Tuple=4 , lowerCAmelCase : Dict=37 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any="None" , lowerCAmelCase : List[Any]=3 , lowerCAmelCase : str=4 , lowerCAmelCase : List[str]=None , ) -> Tuple: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = relative_attention lowercase__ = position_biased_input lowercase__ = pos_att_type lowercase__ = scope def UpperCAmelCase ( self : int) -> int: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = DebertaVaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_lowercase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any) -> str: """simple docstring""" lowercase__ = TFDebertaVaModel(config=_lowercase) lowercase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} lowercase__ = [input_ids, input_mask] lowercase__ = model(_lowercase) lowercase__ = model(_lowercase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : int) -> Dict: """simple docstring""" lowercase__ = TFDebertaVaForMaskedLM(config=_lowercase) lowercase__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowercase__ = model(_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str) -> str: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFDebertaVaForSequenceClassification(config=_lowercase) lowercase__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowercase__ = model(_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]) -> str: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFDebertaVaForTokenClassification(config=_lowercase) lowercase__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowercase__ = model(_lowercase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : str , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]) -> Any: """simple docstring""" lowercase__ = TFDebertaVaForQuestionAnswering(config=_lowercase) lowercase__ = { """input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids, } lowercase__ = model(_lowercase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( lowercase__ ) = config_and_inputs lowercase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): '''simple docstring''' A : str = ( ( TFDebertaVaModel, TFDebertaVaForMaskedLM, TFDebertaVaForQuestionAnswering, TFDebertaVaForSequenceClassification, TFDebertaVaForTokenClassification, ) if is_tf_available() else () ) A : Optional[Any] = ( { "feature-extraction": TFDebertaVaModel, "fill-mask": TFDebertaVaForMaskedLM, "question-answering": TFDebertaVaForQuestionAnswering, "text-classification": TFDebertaVaForSequenceClassification, "token-classification": TFDebertaVaForTokenClassification, "zero-shot": TFDebertaVaForSequenceClassification, } if is_tf_available() else {} ) A : Dict = False A : Any = False def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = TFDebertaVaModelTester(self) lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowercase) def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowercase) def UpperCAmelCase ( self : Dict) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowercase) def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowercase) def UpperCAmelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowercase) @slow def UpperCAmelCase ( self : Tuple) -> List[str]: """simple docstring""" lowercase__ = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge') self.assertIsNotNone(_lowercase) @require_tf class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @unittest.skip(reason='Model not available yet') def UpperCAmelCase ( self : List[str]) -> List[Any]: """simple docstring""" pass @slow def UpperCAmelCase ( self : Tuple) -> List[str]: """simple docstring""" lowercase__ = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge') lowercase__ = tf.constant([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]]) lowercase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) lowercase__ = model(_lowercase , attention_mask=_lowercase)[0] lowercase__ = tf.constant( [[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]]) tf.debugging.assert_near(output[:, 1:4, 1:4] , _lowercase , atol=1E-4)
721
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : List[Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : int = "speech_to_text" A : Optional[Any] = ["past_key_values"] A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict: """simple docstring""" lowercase__ = vocab_size lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = max_source_positions lowercase__ = max_target_positions lowercase__ = num_conv_layers lowercase__ = list(lowerCAmelCase) lowercase__ = conv_channels lowercase__ = input_feat_per_channel lowercase__ = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''') super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
642
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : int = { "asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase__( _lowercase ): '''simple docstring''' A : Union[str, Any] = '''sew-d''' def __init__( self : List[str] , lowerCAmelCase : Dict=32 , lowerCAmelCase : Optional[int]=7_68 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : Tuple=30_72 , lowerCAmelCase : Any=2 , lowerCAmelCase : List[Any]=5_12 , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]=("p2c", "c2p") , lowerCAmelCase : Dict="layer_norm" , lowerCAmelCase : List[str]="gelu_python" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[Any]=0.02 , lowerCAmelCase : Optional[int]=1E-7 , lowerCAmelCase : Optional[Any]=1E-5 , lowerCAmelCase : Optional[int]="group" , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : Tuple=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase : Dict=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : str=1_28 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Tuple=0.05 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : str=0.0 , lowerCAmelCase : Union[str, Any]=10 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Tuple="mean" , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Dict=False , lowerCAmelCase : str=2_56 , lowerCAmelCase : int=0 , lowerCAmelCase : Dict=1 , lowerCAmelCase : List[str]=2 , **lowerCAmelCase : Tuple , ) -> Any: """simple docstring""" super().__init__(**A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_) lowercase__ = hidden_size lowercase__ = feat_extract_norm lowercase__ = feat_extract_activation lowercase__ = list(A_) lowercase__ = list(A_) lowercase__ = list(A_) lowercase__ = conv_bias lowercase__ = num_conv_pos_embeddings lowercase__ = num_conv_pos_embedding_groups lowercase__ = len(self.conv_dim) lowercase__ = num_hidden_layers lowercase__ = intermediate_size lowercase__ = squeeze_factor lowercase__ = max_position_embeddings lowercase__ = position_buckets lowercase__ = share_att_key lowercase__ = relative_attention lowercase__ = norm_rel_ebd lowercase__ = list(A_) lowercase__ = hidden_act lowercase__ = num_attention_heads lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = feat_proj_dropout lowercase__ = final_dropout lowercase__ = layer_norm_eps lowercase__ = feature_layer_norm_eps lowercase__ = initializer_range lowercase__ = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'''but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowercase__ = apply_spec_augment lowercase__ = mask_time_prob lowercase__ = mask_time_length lowercase__ = mask_time_min_masks lowercase__ = mask_feature_prob lowercase__ = mask_feature_length lowercase__ = mask_feature_min_masks # ctc loss lowercase__ = ctc_loss_reduction lowercase__ = ctc_zero_infinity # sequence classification lowercase__ = use_weighted_layer_sum lowercase__ = classifier_proj_size @property def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ): if index == number_of_items: return 0 lowercase__ = 0 lowercase__ = 0 lowercase__ = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 ) if weights[index] <= max_weight: lowercase__ = values[index] + knapsack( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 ) return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
701
# Imports import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int: """simple docstring""" if red is not None: lowercase__ = red if green is not None: lowercase__ = green if blue is not None: lowercase__ = blue if red_edge is not None: lowercase__ = red_edge if nir is not None: lowercase__ = nir return True def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) lowercase__ = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!') return False def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def UpperCAmelCase ( self : int) -> Any: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" return self.nir * (self.red / (self.green**2)) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return (self.nir / self.green) - 1 def UpperCAmelCase ( self : Any) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" return (self.red - self.blue) / self.red def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" return self.nir - self.green def UpperCAmelCase ( self : Tuple) -> List[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def UpperCAmelCase ( self : str) -> int: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple: """simple docstring""" return (self.nir - b) / (a * self.red) def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def UpperCAmelCase ( self : int) -> str: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return self.green / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" return self.red / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max_value - min_value) / max_value def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
642
0
'''simple docstring''' import argparse import json import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification def _lowerCAmelCase ( A__ ): lowercase__ = SwinConfig() lowercase__ = swin_name.split('_' ) lowercase__ = name_split[1] lowercase__ = int(name_split[4] ) lowercase__ = int(name_split[3][-1] ) if model_size == "tiny": lowercase__ = 96 lowercase__ = (2, 2, 6, 2) lowercase__ = (3, 6, 12, 24) elif model_size == "small": lowercase__ = 96 lowercase__ = (2, 2, 18, 2) lowercase__ = (3, 6, 12, 24) elif model_size == "base": lowercase__ = 128 lowercase__ = (2, 2, 18, 2) lowercase__ = (4, 8, 16, 32) else: lowercase__ = 192 lowercase__ = (2, 2, 18, 2) lowercase__ = (6, 12, 24, 48) if "in22k" in swin_name: lowercase__ = 21_841 else: lowercase__ = 1_000 lowercase__ = 'huggingface/label-files' lowercase__ = 'imagenet-1k-id2label.json' lowercase__ = json.load(open(hf_hub_download(A__ , A__ , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(A__ ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = img_size lowercase__ = num_classes lowercase__ = embed_dim lowercase__ = depths lowercase__ = num_heads lowercase__ = window_size return config def _lowerCAmelCase ( A__ ): if "patch_embed.proj" in name: lowercase__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' ) if "patch_embed.norm" in name: lowercase__ = name.replace('patch_embed.norm' , 'embeddings.norm' ) if "layers" in name: lowercase__ = 'encoder.' + name if "attn.proj" in name: lowercase__ = name.replace('attn.proj' , 'attention.output.dense' ) if "attn" in name: lowercase__ = name.replace('attn' , 'attention.self' ) if "norm1" in name: lowercase__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowercase__ = name.replace('norm2' , 'layernorm_after' ) if "mlp.fc1" in name: lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowercase__ = name.replace('mlp.fc2' , 'output.dense' ) if name == "norm.weight": lowercase__ = 'layernorm.weight' if name == "norm.bias": lowercase__ = 'layernorm.bias' if "head" in name: lowercase__ = name.replace('head' , 'classifier' ) else: lowercase__ = 'swin.' + name return name def _lowerCAmelCase ( A__ , A__ ): for key in orig_state_dict.copy().keys(): lowercase__ = orig_state_dict.pop(A__ ) if "mask" in key: continue elif "qkv" in key: lowercase__ = key.split('.' ) lowercase__ = int(key_split[1] ) lowercase__ = int(key_split[3] ) lowercase__ = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: lowercase__ = val[:dim, :] lowercase__ = val[ dim : dim * 2, : ] lowercase__ = val[-dim:, :] else: lowercase__ = val[ :dim ] lowercase__ = val[ dim : dim * 2 ] lowercase__ = val[ -dim: ] else: lowercase__ = val return orig_state_dict def _lowerCAmelCase ( A__ , A__ ): lowercase__ = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() lowercase__ = get_swin_config(A__ ) lowercase__ = SwinForImageClassification(A__ ) model.eval() lowercase__ = convert_state_dict(timm_model.state_dict() , A__ ) model.load_state_dict(A__ ) lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) ) lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw ) lowercase__ = image_processor(images=A__ , return_tensors='pt' ) lowercase__ = timm_model(inputs['pixel_values'] ) lowercase__ = model(**A__ ).logits assert torch.allclose(A__ , A__ , atol=1E-3 ) print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swin_name", default="swin_tiny_patch4_window7_224", type=str, help="Name of the Swin timm model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) a__ : Optional[int] = parser.parse_args() convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
702
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ): '''simple docstring''' def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" lowercase__ = load_tool('text-classification') self.tool.setup() lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Tuple: """simple docstring""" lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive')
642
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a__ : Dict = abspath(join(dirname(dirname(__file__)), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def _lowerCAmelCase ( A__ ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def _lowerCAmelCase ( A__ ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowercase__ = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
703
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[Any] = None A : Optional[int] = None @property def UpperCAmelCase ( self : str) -> Union[str, Any]: """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase ( self : int) -> Any: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(lowerCAmelCase , 'feature_size')) self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate')) self.assertTrue(hasattr(lowerCAmelCase , 'padding_value')) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name]))) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_torch def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_tf def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = self.feat_extract_tester.seq_length_diff lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff lowercase__ = self.feat_extract_tester.min_seq_length lowercase__ = self.feat_extract_tester.batch_size lowercase__ = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest') lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1])) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') lowercase__ = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np') lowercase__ = input_a[input_name] self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) self.assertTrue(len(input_a[0]) == pad_min_length) self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0]))) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a)) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size) # Check padding value is correct lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1E-3) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) # truncate to smallest lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0])) lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to smallest with np lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(input_a.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to middle lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowercase__ = 12 lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , ) lowercase__ = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowercase__ = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) @require_torch def UpperCAmelCase ( self : Dict) -> List[str]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2) @require_tf def UpperCAmelCase ( self : str) -> str: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2) def UpperCAmelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = min(lowerCAmelCase) lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual( list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length]) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
642
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Tuple = { "configuration_table_transformer": [ "TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TableTransformerConfig", "TableTransformerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : str = [ "TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TableTransformerForObjectDetection", "TableTransformerModel", "TableTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TableTransformerConfig, TableTransformerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_table_transformer import ( TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TableTransformerForObjectDetection, TableTransformerModel, TableTransformerPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
704
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowerCAmelCase ( A__ ): lowercase__ = prime_factors(A__ ) if is_square_free(A__ ): return -1 if len(A__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
642
0
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging a__ : List[str] = logging.get_logger(__name__) def _lowerCAmelCase ( A__ ): lowercase__ = r"""\w+[.]\d+""" lowercase__ = re.findall(A__ , A__ ) for pat in pats: lowercase__ = key.replace(A__ , '_'.join(pat.split('.' ) ) ) return key def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = pt_tuple_key[:-1] + ("""scale""",) if ( any('norm' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowercase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowercase__ = pt_tuple_key[:-1] + ("""scale""",) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowercase__ = pt_tuple_key[:-1] + ("""embedding""",) return renamed_pt_tuple_key, pt_tensor # conv layer lowercase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowercase__ = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase__ = pt_tuple_key[:-1] + ("""kernel""",) if pt_tuple_key[-1] == "weight": lowercase__ = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase__ = pt_tuple_key[:-1] + ("""weight""",) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase__ = pt_tuple_key[:-1] + ("""bias""",) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def _lowerCAmelCase ( A__ , A__ , A__=42 ): # Step 1: Convert pytorch tensor to numpy lowercase__ = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowercase__ = flax_model.init_weights(PRNGKey(A__ ) ) lowercase__ = flatten_dict(A__ ) lowercase__ = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase__ = rename_key(A__ ) lowercase__ = tuple(renamed_pt_key.split('.' ) ) # Correctly rename weight parameters lowercase__ = rename_key_and_reshape_tensor(A__ , A__ , A__ ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown lowercase__ = jnp.asarray(A__ ) return unflatten_dict(A__ )
705
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ : List[str] = logging.get_logger(__name__) a__ : List[Any] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ): '''simple docstring''' A : List[str] = "focalnet" def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = use_conv_embed lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = focal_levels lowercase__ = focal_windows lowercase__ = hidden_act lowercase__ = mlp_ratio lowercase__ = hidden_dropout_prob lowercase__ = drop_path_rate lowercase__ = use_layerscale lowercase__ = layerscale_value lowercase__ = use_post_layernorm lowercase__ = use_post_layernorm_in_modulation lowercase__ = normalize_modulator lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = encoder_stride lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)] lowercase__, lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
642
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__( __a , unittest.TestCase ): '''simple docstring''' A : Optional[int] = GPTSanJapaneseTokenizer A : Dict = False A : Union[str, Any] = {"do_clean_text": False, "add_prefix_space": False} def UpperCAmelCase ( self : Dict) -> Optional[int]: """simple docstring""" super().setUp() # fmt: off lowercase__ = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""] # fmt: on lowercase__ = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀 lowercase__ = {"""unk_token""": """<unk>"""} lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens])) with open(self.emoji_file , 'w') as emoji_writer: emoji_writer.write(json.dumps(a_)) def UpperCAmelCase ( self : Dict , **lowerCAmelCase : List[Any]) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a_) def UpperCAmelCase ( self : int , lowerCAmelCase : Any) -> Tuple: """simple docstring""" lowercase__ = """こんにちは、世界。 \nこんばんは、㔺界。😀""" lowercase__ = """こんにちは、世界。 \nこんばんは、世界。😀""" return input_text, output_text def UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.get_input_output_texts(a_) lowercase__ = tokenizer.encode(a_ , add_special_tokens=a_) lowercase__ = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_) return text, ids def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" pass # TODO add if relevant def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" pass # TODO add if relevant def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" pass # TODO add if relevant def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" lowercase__ = self.get_tokenizer() # Testing tokenization lowercase__ = """こんにちは、世界。 こんばんは、㔺界。""" lowercase__ = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""] lowercase__ = tokenizer.tokenize(a_) self.assertListEqual(a_ , a_) # Testing conversion to ids without special tokens lowercase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] lowercase__ = tokenizer.convert_tokens_to_ids(a_) self.assertListEqual(a_ , a_) # Testing conversion to ids with special tokens lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] lowercase__ = tokenizer.convert_tokens_to_ids(a_) self.assertListEqual(a_ , a_) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.get_tokenizer() # Testing tokenization lowercase__ = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。""" lowercase__ = """こんにちは、、、、世界。こんばんは、、、、世界。""" lowercase__ = tokenizer.encode(a_) lowercase__ = tokenizer.decode(a_) self.assertEqual(a_ , a_) @slow def UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" lowercase__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') # Testing tokenization lowercase__ = """こんにちは、世界。""" lowercase__ = """こんばんは、㔺界。😀""" lowercase__ = """こんにちは、世界。こんばんは、世界。😀""" lowercase__ = tokenizer.encode(prefix_text + input_text) lowercase__ = tokenizer.encode('' , prefix_text=prefix_text + input_text) lowercase__ = tokenizer.encode(a_ , prefix_text=a_) lowercase__ = tokenizer.decode(a_) lowercase__ = tokenizer.decode(a_) lowercase__ = tokenizer.decode(a_) self.assertEqual(a_ , a_) self.assertEqual(a_ , a_) self.assertEqual(a_ , a_) @slow def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" lowercase__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') # Testing tokenization lowercase__ = """こんにちは、世界。""" lowercase__ = """こんばんは、㔺界。😀""" lowercase__ = len(tokenizer.encode(a_)) - 2 lowercase__ = len(tokenizer.encode(a_)) - 2 lowercase__ = [1] + [0] * (len_prefix + len_text + 1) lowercase__ = [1] * (len_prefix + len_text + 1) + [0] lowercase__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1) lowercase__ = tokenizer(prefix_text + input_text).token_type_ids lowercase__ = tokenizer('' , prefix_text=prefix_text + input_text).token_type_ids lowercase__ = tokenizer(a_ , prefix_text=a_).token_type_ids self.assertListEqual(a_ , a_) self.assertListEqual(a_ , a_) self.assertListEqual(a_ , a_) @slow def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') lowercase__ = tokenizer.encode('あンいワ') lowercase__ = tokenizer.encode('' , prefix_text='あンいワ') lowercase__ = tokenizer.encode('いワ' , prefix_text='あン') self.assertEqual(tokenizer.decode(a_) , tokenizer.decode(a_)) self.assertEqual(tokenizer.decode(a_) , tokenizer.decode(a_)) self.assertNotEqual(a_ , a_) self.assertNotEqual(a_ , a_) self.assertEqual(x_token_a[1] , x_token_a[-1]) # SEG token self.assertEqual(x_token_a[1] , x_token_a[3]) # SEG token @slow def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" lowercase__ = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese') lowercase__ = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]] lowercase__ = tokenizer(a_ , padding=a_) lowercase__ = tokenizer.batch_encode_plus(a_ , padding=a_) # fmt: off lowercase__ = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]] lowercase__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] lowercase__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids , a_) self.assertListEqual(x_token.token_type_ids , a_) self.assertListEqual(x_token.attention_mask , a_) self.assertListEqual(x_token_a.input_ids , a_) self.assertListEqual(x_token_a.token_type_ids , a_) self.assertListEqual(x_token_a.attention_mask , a_) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" pass def UpperCAmelCase ( self : Any) -> Dict: """simple docstring""" pass
706
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Dict = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } a__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } a__ : Any = {"facebook/blenderbot_small-90M": 5_12} def _lowerCAmelCase ( A__ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char lowercase__ = set(A__ ) return pairs class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[str] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase) with open(lowerCAmelCase , encoding='utf-8') as vocab_handle: lowercase__ = json.load(lowerCAmelCase) lowercase__ = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase , encoding='utf-8') as merges_handle: lowercase__ = merges_handle.read().split('\n')[1:-1] lowercase__ = [tuple(merge.split()) for merge in merges] lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = {} @property def UpperCAmelCase ( self : int) -> int: """simple docstring""" return len(self.encoder) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase) lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase) lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase) if "\n" in token: lowercase__ = token.replace('\n' , ' __newln__') lowercase__ = token.split(' ') lowercase__ = [] for token in tokens: if not len(lowerCAmelCase): continue lowercase__ = token.lower() lowercase__ = tuple(lowerCAmelCase) lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>']) lowercase__ = get_pairs(lowerCAmelCase) if not pairs: words.append(lowerCAmelCase) continue while True: lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf'))) if bigram not in self.bpe_ranks: break lowercase__, lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(lowerCAmelCase): try: lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase) new_word.extend(word[i:j]) lowercase__ = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 lowercase__ = tuple(lowerCAmelCase) lowercase__ = new_word if len(lowerCAmelCase) == 1: break else: lowercase__ = get_pairs(lowerCAmelCase) lowercase__ = '@@ '.join(lowerCAmelCase) lowercase__ = word[:-4] lowercase__ = word words.append(lowerCAmelCase) return " ".join(lowerCAmelCase) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' '))) return split_tokens def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int: """simple docstring""" lowercase__ = token.lower() return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token)) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str: """simple docstring""" return self.decoder.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str: """simple docstring""" lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip() return out_string def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(lowerCAmelCase , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n') lowercase__ = 0 with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!') lowercase__ = token_index writer.write(' '.join(lowerCAmelCase) + '\n') index += 1 return vocab_file, merge_file
642
0
import torch from diffusers import DDPMScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : int = (DDPMScheduler,) def UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase : str) -> Dict: """simple docstring""" lowercase__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'variance_type': 'fixed_small', 'clip_sample': True, } config.update(**__lowerCAmelCase) return config def UpperCAmelCase ( self : int) -> Any: """simple docstring""" for timesteps in [1, 5, 1_00, 10_00]: self.check_over_configs(num_train_timesteps=__lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Dict: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=__lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=__lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> Union[str, Any]: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=__lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" self.check_over_configs(thresholding=__lowerCAmelCase) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs( thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , ) def UpperCAmelCase ( self : Tuple) -> List[Any]: """simple docstring""" for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=__lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" for t in [0, 5_00, 9_99]: self.check_over_forward(time_step=__lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> str: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase) assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.02)) < 1E-5 def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase) lowercase__ = len(__lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = torch.manual_seed(0) for t in reversed(range(__lowerCAmelCase)): # 1. predict noise residual lowercase__ = model(__lowerCAmelCase , __lowerCAmelCase) # 2. predict previous mean of sample x_t-1 lowercase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ = pred_prev_sample lowercase__ = torch.sum(torch.abs(__lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(__lowerCAmelCase)) assert abs(result_sum.item() - 2_58.96_06) < 1E-2 assert abs(result_mean.item() - 0.33_72) < 1E-3 def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(prediction_type='v_prediction') lowercase__ = scheduler_class(**__lowerCAmelCase) lowercase__ = len(__lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = torch.manual_seed(0) for t in reversed(range(__lowerCAmelCase)): # 1. predict noise residual lowercase__ = model(__lowerCAmelCase , __lowerCAmelCase) # 2. predict previous mean of sample x_t-1 lowercase__ = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase).prev_sample # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance lowercase__ = pred_prev_sample lowercase__ = torch.sum(torch.abs(__lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(__lowerCAmelCase)) assert abs(result_sum.item() - 2_02.02_96) < 1E-2 assert abs(result_mean.item() - 0.26_31) < 1E-3 def UpperCAmelCase ( self : int) -> List[str]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase) lowercase__ = [1_00, 87, 50, 1, 0] scheduler.set_timesteps(timesteps=__lowerCAmelCase) lowercase__ = scheduler.timesteps for i, timestep in enumerate(__lowerCAmelCase): if i == len(__lowerCAmelCase) - 1: lowercase__ = -1 else: lowercase__ = timesteps[i + 1] lowercase__ = scheduler.previous_timestep(__lowerCAmelCase) lowercase__ = prev_t.item() self.assertEqual(__lowerCAmelCase , __lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase) lowercase__ = [1_00, 87, 50, 51, 0] with self.assertRaises(__lowerCAmelCase , msg='`custom_timesteps` must be in descending order.'): scheduler.set_timesteps(timesteps=__lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase) lowercase__ = [1_00, 87, 50, 1, 0] lowercase__ = len(__lowerCAmelCase) with self.assertRaises(__lowerCAmelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.'): scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase) def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**__lowerCAmelCase) lowercase__ = [scheduler.config.num_train_timesteps] with self.assertRaises( __lowerCAmelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ): scheduler.set_timesteps(timesteps=__lowerCAmelCase)
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
import argparse import torch from transformers import GPTaLMHeadModel, RobertaForMaskedLM if __name__ == "__main__": a__ : int = argparse.ArgumentParser( description=( "Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="roberta", choices=["roberta", "gpt2"]) parser.add_argument("--model_name", default="roberta-large", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_roberta_048131723.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") a__ : str = parser.parse_args() if args.model_type == "roberta": a__ : Optional[Any] = RobertaForMaskedLM.from_pretrained(args.model_name) a__ : int = "roberta" elif args.model_type == "gpt2": a__ : List[str] = GPTaLMHeadModel.from_pretrained(args.model_name) a__ : Optional[int] = "transformer" a__ : Tuple = model.state_dict() a__ : Any = {} # Embeddings # if args.model_type == "gpt2": for param_name in ["wte.weight", "wpe.weight"]: a__ : Optional[int] = state_dict[F'''{prefix}.{param_name}'''] else: for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]: a__ : List[Any] = F'''{prefix}.embeddings.{w}.weight''' a__ : List[str] = state_dict[param_name] for w in ["weight", "bias"]: a__ : Optional[Any] = F'''{prefix}.embeddings.LayerNorm.{w}''' a__ : Union[str, Any] = state_dict[param_name] # Transformer Blocks # a__ : List[str] = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: if args.model_type == "gpt2": for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]: for w in ["weight", "bias"]: a__ : str = state_dict[ F'''{prefix}.h.{teacher_idx}.{layer}.{w}''' ] a__ : List[str] = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias'''] else: for layer in [ "attention.self.query", "attention.self.key", "attention.self.value", "attention.output.dense", "attention.output.LayerNorm", "intermediate.dense", "output.dense", "output.LayerNorm", ]: for w in ["weight", "bias"]: a__ : Optional[int] = state_dict[ F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}''' ] std_idx += 1 # Language Modeling Head ###s if args.model_type == "roberta": for layer in ["lm_head.decoder.weight", "lm_head.bias"]: a__ : Any = state_dict[F'''{layer}'''] if args.vocab_transform: for w in ["weight", "bias"]: a__ : Dict = state_dict[F'''lm_head.dense.{w}'''] a__ : str = state_dict[F'''lm_head.layer_norm.{w}'''] elif args.model_type == "gpt2": for w in ["weight", "bias"]: a__ : str = state_dict[F'''{prefix}.ln_f.{w}'''] a__ : List[Any] = state_dict["lm_head.weight"] print(F'''N layers selected for distillation: {std_idx}''') print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''') print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''') torch.save(compressed_sd, args.dump_checkpoint)
708
import heapq import sys import numpy as np a__ : Dict = tuple[int, int] class UpperCAmelCase__: '''simple docstring''' def __init__( self : List[str]) -> Any: """simple docstring""" lowercase__ = [] lowercase__ = set() def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float('inf') def UpperCAmelCase ( self : int) -> str: """simple docstring""" return len(self.elements) == 0 def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(lowerCAmelCase) else: # update # print("update", item) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple: """simple docstring""" if item in self.set: self.set.remove(lowerCAmelCase) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" return self.elements[0][1] def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) self.set.remove(lowerCAmelCase) return (priority, item) def _lowerCAmelCase ( A__ , A__ ): # euclidean distance lowercase__ = np.array(A__ ) lowercase__ = np.array(A__ ) return np.linalg.norm(a - b ) def _lowerCAmelCase ( A__ , A__ ): # integer division by time variable return consistent_heuristic(A__ , A__ ) // t def _lowerCAmelCase ( A__ , A__ ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ ) return ans def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = np.chararray((n, n) ) for i in range(A__ ): for j in range(A__ ): lowercase__ = '*' for i in range(A__ ): for j in range(A__ ): if (j, (n - 1) - i) in blocks: lowercase__ = '#' lowercase__ = '-' lowercase__ = back_pointer[goal] while x != start: ((lowercase__), (lowercase__)) = x # print(x) lowercase__ = '-' lowercase__ = back_pointer[x] lowercase__ = '-' for i in range(A__ ): for j in range(A__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) lowercase__ = back_pointer[goal] while x != start: print(A__ , end=' ' ) lowercase__ = back_pointer[x] print(A__ ) sys.exit() def _lowerCAmelCase ( A__ ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ): for itera in range(A__ ): open_list[itera].remove_element(A__ ) # print("s", s) # print("j", j) ((lowercase__), (lowercase__)) = s lowercase__ = (x - 1, y) lowercase__ = (x + 1, y) lowercase__ = (x, y + 1) lowercase__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(A__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(A__ ) lowercase__ = -1 lowercase__ = float('inf' ) if valid(A__ ) and g_function[neighbours] > g_function[s] + 1: lowercase__ = g_function[s] + 1 lowercase__ = s if neighbours not in close_list_anchor: open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) ) if neighbours not in close_list_inad: for var in range(1 , A__ ): if key(A__ , A__ , A__ , A__ ) <= Wa * key( A__ , 0 , A__ , A__ ): open_list[j].put( A__ , key(A__ , A__ , A__ , A__ ) ) def _lowerCAmelCase ( ): lowercase__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} a__ : Any = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] a__ : Any = make_common_ground() a__ : Union[str, Any] = blocks_blk # hyper parameters a__ : List[Any] = 1 a__ : List[str] = 1 a__ : Optional[int] = 20 a__ : Optional[Any] = 3 # one consistent and two other inconsistent # start and end destination a__ : Tuple = (0, 0) a__ : str = (n - 1, n - 1) a__ : Optional[Any] = 1 def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = {start: 0, goal: float('inf' )} lowercase__ = {start: -1, goal: -1} lowercase__ = [] lowercase__ = set() for i in range(A__ ): open_list.append(PriorityQueue() ) open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) ) lowercase__ = [] lowercase__ = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , A__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__, lowercase__ = open_list[i].top_show() visited.add(A__ ) expand_state( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_inad.append(A__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__ = open_list[0].top_show() visited.add(A__ ) expand_state( A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_anchor.append(A__ ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(A__ ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
642
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation a__ : Tuple = logging.get_logger(__name__) a__ : Optional[Any] = {'''tokenizer_file''': '''tokenizer.json'''} a__ : str = { '''tokenizer_file''': { '''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''', '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''', }, } class UpperCAmelCase__( a__ ): '''simple docstring''' A : Tuple = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : List[Any] = ["""input_ids""", """attention_mask"""] A : Dict = None def __init__( self : int , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : str="<s>" , lowerCAmelCase : Optional[Any]="</s>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False , **lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" super().__init__( lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ , **lowerCAmelCase__ , ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase__) != add_prefix_space: lowercase__ = getattr(lowerCAmelCase__ , pre_tok_state.pop('type')) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**lowerCAmelCase__) lowercase__ = add_prefix_space def UpperCAmelCase ( self : str , *lowerCAmelCase : Dict , **lowerCAmelCase : Dict) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase__) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__) def UpperCAmelCase ( self : int , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase__) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' ' pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple = None) -> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[Any]) -> List[int]: """simple docstring""" lowercase__ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__) + [self.eos_token_id]) if len(lowerCAmelCase__) > self.model_max_length: lowercase__ = input_ids[-self.model_max_length :] return input_ids
709
import math import sys def _lowerCAmelCase ( A__ ): lowercase__ = '' try: with open(A__ , 'rb' ) as binary_file: lowercase__ = binary_file.read() for dat in data: lowercase__ = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = {'0': '0', '1': '1'} lowercase__, lowercase__ = '', '' lowercase__ = len(A__ ) for i in range(len(A__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowercase__ = lexicon[curr_string] result += last_match_id lowercase__ = last_match_id + '0' if math.loga(A__ ).is_integer(): lowercase__ = {} for curr_key in list(A__ ): lowercase__ = lexicon.pop(A__ ) lowercase__ = new_lex lowercase__ = last_match_id + '1' index += 1 lowercase__ = '' return result def _lowerCAmelCase ( A__ , A__ ): lowercase__ = 8 try: with open(A__ , 'wb' ) as opened_file: lowercase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A__ ) , A__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowercase__ = data_bits[counter:] lowercase__ = data_bits[counter + 1 :] return data_bits def _lowerCAmelCase ( A__ , A__ ): lowercase__ = read_file_binary(A__ ) lowercase__ = remove_prefix(A__ ) lowercase__ = decompress_data(A__ ) write_file_binary(A__ , A__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
642
0
import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(UpperCAmelCase__ ) , "Tatoeba directory does not exist." ) class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : List[str]) -> int: """simple docstring""" lowercase__ = tempfile.mkdtemp() return TatoebaConverter(save_dir=__lowerCAmelCase) @slow def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" self.resolver.convert_models(['heb-eng']) @slow def UpperCAmelCase ( self : Dict) -> Any: """simple docstring""" lowercase__, lowercase__ = self.resolver.write_model_card('opus-mt-he-en' , dry_run=__lowerCAmelCase) assert mmeta["long_pair"] == "heb-eng"
710
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging a__ : int = logging.get_logger(__name__) a__ : Tuple = {"vocab_file": "vocab.txt"} a__ : int = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } a__ : Dict = { "facebook/esm2_t6_8M_UR50D": 10_24, "facebook/esm2_t12_35M_UR50D": 10_24, } def _lowerCAmelCase ( A__ ): with open(A__ , 'r' ) as f: lowercase__ = f.read().splitlines() return [l.strip() for l in lines] class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = load_vocab_file(lowerCAmelCase) lowercase__ = dict(enumerate(self.all_tokens)) lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)} lowercase__ = unk_token lowercase__ = cls_token lowercase__ = pad_token lowercase__ = mask_token lowercase__ = eos_token lowercase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" return text.split() def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]: """simple docstring""" return len(self._id_to_token) def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens)} def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.cls_token_id] lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!') return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1] if token_ids_a is not None: mask += [0] * len(lowerCAmelCase) + [1] return mask def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt') with open(lowerCAmelCase , 'w') as f: f.write('\n'.join(self.all_tokens)) return (vocab_file,) @property def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int: """simple docstring""" return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
642
0
'''simple docstring''' import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Tuple) -> Tuple: """simple docstring""" lowercase__ = FlaxMTaForConditionalGeneration.from_pretrained('google/mt5-small') lowercase__ = AutoTokenizer.from_pretrained('google/mt5-small') lowercase__ = tokenizer('Hello there' , return_tensors='np').input_ids lowercase__ = tokenizer('Hi I am' , return_tensors='np').input_ids lowercase__ = shift_tokens_right(UpperCAmelCase_ , model.config.pad_token_id , model.config.decoder_start_token_id) lowercase__ = model(UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_).logits lowercase__ = optax.softmax_cross_entropy(UpperCAmelCase_ , onehot(UpperCAmelCase_ , logits.shape[-1])).mean() lowercase__ = -(labels.shape[-1] * loss.item()) lowercase__ = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1E-4)
711
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase) }
642
0
from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : Any = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys a__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
712
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = block_sizes lowercase__ = num_decoder_layers lowercase__ = d_model lowercase__ = n_head lowercase__ = d_head lowercase__ = d_inner lowercase__ = hidden_act lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = 2 lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = initializer_std # Used in the tests to check the size of the first attention layer lowercase__ = n_head # Used in the tests to check the size of the first hidden state lowercase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowercase__ = self.num_hidden_layers + 2 def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str: """simple docstring""" lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = self.num_choices lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) A : Dict = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) A : Optional[int] = False A : Optional[int] = False def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = TFFunnelModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase) @require_tf class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) A : List[str] = False A : int = False def UpperCAmelCase ( self : Any) -> List[Any]: """simple docstring""" lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
642
0
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration a__ : Union[str, Any] = "facebook/wmt19-en-de" a__ : List[Any] = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model a__ : Optional[Any] = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) a__ : List[Any] = FSMTForConditionalGeneration(config) print(F'''num of params {tiny_model.num_parameters()}''') # Test a__ : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt") a__ : Tuple = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save a__ : Optional[int] = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'''Generated {mname_tiny}''') # Upload # transformers-cli upload tiny-wmt19-en-de
713
def _lowerCAmelCase ( A__ , A__ , A__ ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate lowercase__ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowercase__ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
642
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ : Dict = logging.get_logger(__name__) def _lowerCAmelCase ( A__ ): lowercase__ = DPTConfig() if "large" in checkpoint_url: lowercase__ = 1_024 lowercase__ = 4_096 lowercase__ = 24 lowercase__ = 16 lowercase__ = [5, 11, 17, 23] lowercase__ = [256, 512, 1_024, 1_024] lowercase__ = (1, 384, 384) if "ade" in checkpoint_url: lowercase__ = True lowercase__ = 150 lowercase__ = """huggingface/label-files""" lowercase__ = """ade20k-id2label.json""" lowercase__ = json.load(open(cached_download(hf_hub_url(A__ , A__ , repo_type='dataset' ) ) , 'r' ) ) lowercase__ = {int(A__ ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} lowercase__ = [1, 150, 480, 480] return config, expected_shape def _lowerCAmelCase ( A__ ): lowercase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def _lowerCAmelCase ( A__ ): if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowercase__ = name.replace('pretrained.model' , 'dpt.encoder' ) if "pretrained.model" in name: lowercase__ = name.replace('pretrained.model' , 'dpt.embeddings' ) if "patch_embed" in name: lowercase__ = name.replace('patch_embed' , 'patch_embeddings' ) if "pos_embed" in name: lowercase__ = name.replace('pos_embed' , 'position_embeddings' ) if "attn.proj" in name: lowercase__ = name.replace('attn.proj' , 'attention.output.dense' ) if "proj" in name and "project" not in name: lowercase__ = name.replace('proj' , 'projection' ) if "blocks" in name: lowercase__ = name.replace('blocks' , 'layer' ) if "mlp.fc1" in name: lowercase__ = name.replace('mlp.fc1' , 'intermediate.dense' ) if "mlp.fc2" in name: lowercase__ = name.replace('mlp.fc2' , 'output.dense' ) if "norm1" in name: lowercase__ = name.replace('norm1' , 'layernorm_before' ) if "norm2" in name: lowercase__ = name.replace('norm2' , 'layernorm_after' ) if "scratch.output_conv" in name: lowercase__ = name.replace('scratch.output_conv' , 'head' ) if "scratch" in name: lowercase__ = name.replace('scratch' , 'neck' ) if "layer1_rn" in name: lowercase__ = name.replace('layer1_rn' , 'convs.0' ) if "layer2_rn" in name: lowercase__ = name.replace('layer2_rn' , 'convs.1' ) if "layer3_rn" in name: lowercase__ = name.replace('layer3_rn' , 'convs.2' ) if "layer4_rn" in name: lowercase__ = name.replace('layer4_rn' , 'convs.3' ) if "refinenet" in name: lowercase__ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowercase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' ) if "out_conv" in name: lowercase__ = name.replace('out_conv' , 'projection' ) if "resConfUnit1" in name: lowercase__ = name.replace('resConfUnit1' , 'residual_layer1' ) if "resConfUnit2" in name: lowercase__ = name.replace('resConfUnit2' , 'residual_layer2' ) if "conv1" in name: lowercase__ = name.replace('conv1' , 'convolution1' ) if "conv2" in name: lowercase__ = name.replace('conv2' , 'convolution2' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowercase__ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' ) if "pretrained.act_postprocess2.0.project.0" in name: lowercase__ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' ) if "pretrained.act_postprocess3.0.project.0" in name: lowercase__ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' ) if "pretrained.act_postprocess4.0.project.0" in name: lowercase__ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowercase__ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' ) if "pretrained.act_postprocess1.4" in name: lowercase__ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' ) if "pretrained.act_postprocess2.3" in name: lowercase__ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' ) if "pretrained.act_postprocess2.4" in name: lowercase__ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' ) if "pretrained.act_postprocess3.3" in name: lowercase__ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' ) if "pretrained.act_postprocess4.3" in name: lowercase__ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' ) if "pretrained.act_postprocess4.4" in name: lowercase__ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' ) if "pretrained" in name: lowercase__ = name.replace('pretrained' , 'dpt' ) if "bn" in name: lowercase__ = name.replace('bn' , 'batch_norm' ) if "head" in name: lowercase__ = name.replace('head' , 'head.head' ) if "encoder.norm" in name: lowercase__ = name.replace('encoder.norm' , 'layernorm' ) if "auxlayer" in name: lowercase__ = name.replace('auxlayer' , 'auxiliary_head.head' ) return name def _lowerCAmelCase ( A__ , A__ ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' ) lowercase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase__ = in_proj_weight[: config.hidden_size, :] lowercase__ = in_proj_bias[: config.hidden_size] lowercase__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase__ = in_proj_weight[ -config.hidden_size :, : ] lowercase__ = in_proj_bias[-config.hidden_size :] def _lowerCAmelCase ( ): lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase__ = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__ = get_dpt_config(A__ ) # load original state_dict from URL lowercase__ = torch.hub.load_state_dict_from_url(A__ , map_location='cpu' ) # remove certain keys remove_ignore_keys_(A__ ) # rename keys for key in state_dict.copy().keys(): lowercase__ = state_dict.pop(A__ ) lowercase__ = val # read in qkv matrices read_in_q_k_v(A__ , A__ ) # load HuggingFace model lowercase__ = DPTForSemanticSegmentation(A__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(A__ ) model.load_state_dict(A__ ) model.eval() # Check outputs on an image lowercase__ = 480 if """ade""" in checkpoint_url else 384 lowercase__ = DPTImageProcessor(size=A__ ) lowercase__ = prepare_img() lowercase__ = image_processor(A__ , return_tensors='pt' ) # forward pass lowercase__ = model(**A__ ).logits if """ade""" in checkpoint_url else model(**A__ ).predicted_depth # Assert logits lowercase__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] ) if "ade" in checkpoint_url: lowercase__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] ) assert outputs.shape == torch.Size(A__ ) assert ( torch.allclose(outputs[0, 0, :3, :3] , A__ , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , A__ ) ) Path(A__ ).mkdir(exist_ok=A__ ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(A__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A__ ) if push_to_hub: print('Pushing model to hub...' ) model.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=A__ , ) image_processor.push_to_hub( repo_path_or_name=Path(A__ , A__ ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=A__ , ) if __name__ == "__main__": a__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt", type=str, help="URL of the original DPT checkpoint you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--push_to_hub", action="store_true", ) parser.add_argument( "--model_name", default="dpt-large", type=str, help="Name of the model, in case you're pushing to the hub.", ) a__ : int = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
714
from __future__ import annotations def _lowerCAmelCase ( A__ , A__ ): if b == 0: return (1, 0) ((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b ) lowercase__ = a // b return (y, x - k * y) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m def _lowerCAmelCase ( A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) if b < 0: lowercase__ = (b % n + n) % n return b def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="chinese_remainder_theorem", verbose=True) testmod(name="chinese_remainder_theorem2", verbose=True) testmod(name="invert_modulo", verbose=True) testmod(name="extended_euclid", verbose=True)
642
0
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCAmelCase__: '''simple docstring''' A : int A : TreeNode | None = None A : TreeNode | None = None a__ = namedtuple("CoinsDistribResult", "moves excess") def _lowerCAmelCase ( A__ ): if root is None: return 0 # Validation def count_nodes(A__ ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(A__ ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(SCREAMING_SNAKE_CASE_ ) != count_coins(SCREAMING_SNAKE_CASE_ ): raise ValueError('The nodes number should be same as the number of coins' ) # Main calculation def get_distrib(A__ ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) lowercase__, lowercase__ = get_distrib(node.left ) lowercase__, lowercase__ = get_distrib(node.right ) lowercase__ = 1 - left_distrib_excess lowercase__ = 1 - right_distrib_excess lowercase__ = ( left_distrib_moves + right_distrib_moves + abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ ) ) lowercase__ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return get_distrib(SCREAMING_SNAKE_CASE_ )[0] if __name__ == "__main__": import doctest doctest.testmod()
715
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[Any] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = "umt5" A : List[str] = ["past_key_values"] def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__( is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split('-') lowercase__ = act_info[-1] lowercase__ = act_info[0] == 'gated' if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": lowercase__ = 'gelu_new' @property def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" return self.d_model @property def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" return self.num_heads @property def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return self.num_layers class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowercase__ = 'past_encoder_sequence + sequence' lowercase__ = {0: 'batch'} lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowercase__ = {0: 'batch', 1: 'decoder_sequence'} lowercase__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCAmelCase ( self : int) -> int: """simple docstring""" return 13 @property def UpperCAmelCase ( self : Optional[Any]) -> float: """simple docstring""" return 5E-4
642
0
from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
716
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : str = XGLMTokenizer A : List[Any] = XGLMTokenizerFast A : int = True A : Optional[Any] = True def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = '<pad>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : str) -> List[str]: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(len(lowerCAmelCase) , 10_08) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08) def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = tokenizer.tokenize('This is a test') self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return XGLMTokenizer.from_pretrained('facebook/xglm-564M') def UpperCAmelCase ( self : Optional[int]) -> Dict: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase , f.name) lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase) lowercase__ = pickle.dumps(lowerCAmelCase) pickle.loads(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.tokenize(lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @slow def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" lowercase__ = 'Hello World!' lowercase__ = [2, 3_12_27, 44_47, 35] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = { 'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
642
0
import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=14 , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : int=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=99 , lowerCAmelCase : int=32 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : str=4 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : List[Any]=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : List[str]=0.02 , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = rotary_dim lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = initializer_range lowercase__ = None lowercase__ = vocab_size - 1 lowercase__ = vocab_size - 1 lowercase__ = vocab_size - 1 def UpperCAmelCase ( self : List[str]) -> Tuple: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__ = config_and_inputs lowercase__ = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" lowercase__ = 20 lowercase__ = model_class_name(lowerCAmelCase) lowercase__ = model.init_cache(input_ids.shape[0] , lowerCAmelCase) lowercase__ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4') lowercase__ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ = model( input_ids[:, :-1] , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , position_ids=lowerCAmelCase , ) lowercase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4') lowercase__ = model( input_ids[:, -1:] , attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase , ) lowercase__ = model(lowerCAmelCase) lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''') def UpperCAmelCase ( self : Dict , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict) -> Tuple: """simple docstring""" lowercase__ = 20 lowercase__ = model_class_name(lowerCAmelCase) lowercase__ = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]))] , axis=-1 , ) lowercase__ = model.init_cache(input_ids.shape[0] , lowerCAmelCase) lowercase__ = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1)[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1)) lowercase__ = model( input_ids[:, :-1] , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , position_ids=lowerCAmelCase , ) lowercase__ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4') lowercase__ = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase , position_ids=lowerCAmelCase , ) lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase) lowercase__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1E-3 , msg=f'''Max diff is {diff}''') @require_flax class UpperCAmelCase__( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () A : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else () def UpperCAmelCase ( self : Optional[Any]) -> int: """simple docstring""" lowercase__ = FlaxGPTJModelTester(self) def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" for model_class_name in self.all_model_classes: lowercase__, lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Optional[int]: """simple docstring""" for model_class_name in self.all_model_classes: lowercase__, lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) @tooslow def UpperCAmelCase ( self : int) -> str: """simple docstring""" lowercase__ = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left') lowercase__ = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=lowerCAmelCase , truncation=lowerCAmelCase) lowercase__ = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B') lowercase__ = False lowercase__ = model.config.eos_token_id lowercase__ = jax.jit(model.generate) lowercase__ = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id).sequences lowercase__ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase) lowercase__ = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @is_pt_flax_cross_test def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase) lowercase__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ = getattr(lowerCAmelCase , lowerCAmelCase) lowercase__, lowercase__ = pt_inputs['input_ids'].shape lowercase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(lowerCAmelCase): lowercase__ = 0 lowercase__ = 1 lowercase__ = 0 lowercase__ = 1 lowercase__ = pt_model_class(lowerCAmelCase).eval() lowercase__ = model_class(lowerCAmelCase , dtype=jnp.floataa) lowercase__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase) lowercase__ = fx_state with torch.no_grad(): lowercase__ = pt_model(**lowerCAmelCase).to_tuple() lowercase__ = fx_model(**lowerCAmelCase).to_tuple() self.assertEqual(len(lowerCAmelCase) , len(lowerCAmelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(lowerCAmelCase , lowerCAmelCase): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(lowerCAmelCase) lowercase__ = model_class.from_pretrained(lowerCAmelCase , from_pt=lowerCAmelCase) lowercase__ = fx_model_loaded(**lowerCAmelCase).to_tuple() self.assertEqual( len(lowerCAmelCase) , len(lowerCAmelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output_loaded, pt_output in zip(lowerCAmelCase , lowerCAmelCase): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2) @is_pt_flax_cross_test def UpperCAmelCase ( self : List[Any]) -> List[str]: """simple docstring""" lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # prepare inputs lowercase__ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase) lowercase__ = {k: torch.tensor(v.tolist()) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowercase__ = model_class.__name__[4:] # Skip the "Flax" at the beginning lowercase__ = getattr(lowerCAmelCase , lowerCAmelCase) lowercase__ = pt_model_class(lowerCAmelCase).eval() lowercase__ = model_class(lowerCAmelCase , dtype=jnp.floataa) lowercase__ = load_flax_weights_in_pytorch_model(lowerCAmelCase , fx_model.params) lowercase__, lowercase__ = pt_inputs['input_ids'].shape lowercase__ = np.random.randint(0 , seq_length - 1 , size=(batch_size,)) for batch_idx, start_index in enumerate(lowerCAmelCase): lowercase__ = 0 lowercase__ = 1 lowercase__ = 0 lowercase__ = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowercase__ = pt_model(**lowerCAmelCase).to_tuple() lowercase__ = fx_model(**lowerCAmelCase).to_tuple() self.assertEqual(len(lowerCAmelCase) , len(lowerCAmelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(lowerCAmelCase , lowerCAmelCase): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(lowerCAmelCase) lowercase__ = pt_model_class.from_pretrained(lowerCAmelCase , from_flax=lowerCAmelCase) with torch.no_grad(): lowercase__ = pt_model_loaded(**lowerCAmelCase).to_tuple() self.assertEqual( len(lowerCAmelCase) , len(lowerCAmelCase) , 'Output lengths differ between Flax and PyTorch') for fx_output, pt_output in zip(lowerCAmelCase , lowerCAmelCase): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2) @tooslow def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" for model_class_name in self.all_model_classes: lowercase__ = model_class_name.from_pretrained('EleutherAI/gpt-j-6B') lowercase__ = model(np.ones((1, 1))) self.assertIsNotNone(lowerCAmelCase)
717
import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCAmelCase__: '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" lowercase__ = data lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64) lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data)) return padded_data def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64) ] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]: """simple docstring""" lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64 for i in range(16 , 80): lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1) return w def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.padding() lowercase__ = self.split_blocks() for block in self.blocks: lowercase__ = self.expand_block(lowerCAmelCase) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h for i in range(0 , 80): if 0 <= i < 20: lowercase__ = (b & c) | ((~b) & d) lowercase__ = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase__ = b ^ c ^ d lowercase__ = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase__ = (b & c) | (b & d) | (c & d) lowercase__ = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase__ = b ^ c ^ d lowercase__ = 0XC_A_6_2_C_1_D_6 lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = ( self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(lowerCAmelCase , 30), c, d, ) lowercase__ = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h) def _lowerCAmelCase ( ): lowercase__ = B'Test String' assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def _lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowercase__ = parser.parse_args() lowercase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase__ = f.read() else: lowercase__ = bytes(A__ , 'utf-8' ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
642
0
from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class UpperCAmelCase__( UpperCAmelCase__ , UpperCAmelCase__ ): '''simple docstring''' A : int = "pixel_values" A : Dict = False A : Union[str, Any] = TimmBackboneConfig def __init__( self : List[str] , lowerCAmelCase : Dict , **lowerCAmelCase : str) -> Dict: """simple docstring""" requires_backends(self , 'timm') super().__init__(lowerCamelCase__) lowercase__ = config if config.backbone is None: raise ValueError('backbone is not set in the config. Please set it to a timm model name.') if config.backbone not in timm.list_models(): raise ValueError(f'''backbone {config.backbone} is not supported by timm.''') if hasattr(lowerCamelCase__ , 'out_features') and config.out_features is not None: raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.') lowercase__ = getattr(lowerCamelCase__ , 'use_pretrained_backbone' , lowerCamelCase__) if pretrained is None: raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.') # We just take the final layer by default. This matches the default for the transformers models. lowercase__ = config.out_indices if getattr(lowerCamelCase__ , 'out_indices' , lowerCamelCase__) is not None else (-1,) lowercase__ = timm.create_model( config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. lowercase__ = self._backbone.return_layers lowercase__ = {layer["module"]: str(lowerCamelCase__) for i, layer in enumerate(self._backbone.feature_info.info)} super()._init_backbone(lowerCamelCase__) @classmethod def UpperCAmelCase ( cls : List[str] , lowerCAmelCase : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Optional[Any]: """simple docstring""" requires_backends(cls , ['vision', 'timm']) from ...models.timm_backbone import TimmBackboneConfig lowercase__ = kwargs.pop('config' , TimmBackboneConfig()) lowercase__ = kwargs.pop('use_timm_backbone' , lowerCamelCase__) if not use_timm: raise ValueError('use_timm_backbone must be True for timm backbones') lowercase__ = kwargs.pop('num_channels' , config.num_channels) lowercase__ = kwargs.pop('features_only' , config.features_only) lowercase__ = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone) lowercase__ = kwargs.pop('out_indices' , config.out_indices) lowercase__ = TimmBackboneConfig( backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , ) return super()._from_config(lowerCamelCase__ , **lowerCamelCase__) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" pass def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Dict) -> Union[BackboneOutput, Tuple[Tensor, ...]]: """simple docstring""" lowercase__ = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('Cannot output attentions for timm backbones at the moment') if output_hidden_states: # We modify the return layers to include all the stages of the backbone lowercase__ = self._all_layers lowercase__ = self._backbone(lowerCamelCase__ , **lowerCamelCase__) lowercase__ = self._return_layers lowercase__ = tuple(hidden_states[i] for i in self.out_indices) else: lowercase__ = self._backbone(lowerCamelCase__ , **lowerCamelCase__) lowercase__ = None lowercase__ = tuple(lowerCamelCase__) lowercase__ = tuple(lowerCamelCase__) if hidden_states is not None else None if not return_dict: lowercase__ = (feature_maps,) if output_hidden_states: lowercase__ = output + (hidden_states,) return output return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__)
718
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart a__ : List[Any] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } a__ : int = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Optional[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : int = ["input_ids", "attention_mask"] A : Any = BartTokenizer def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type')) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**lowerCAmelCase) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = 'post_processor' lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state['sep']) if "cls" in state: lowercase__ = tuple(state['cls']) lowercase__ = False if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(lowerCAmelCase , state.pop('type')) lowercase__ = component_class(**lowerCAmelCase) setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) @property def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]: """simple docstring""" lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value lowercase__ = value def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase) return tuple(lowerCAmelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple: """simple docstring""" lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
642
0
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger a__ : int = "<<<<<<< This should probably be modified because it mentions: " a__ : Optional[int] = "=======\n>>>>>>>\n" a__ : int = [ "TextEncoderConfig", "ByteTextEncoder", "SubwordTextEncoder", "encoder_config", "maybe_build_from_corpus", "manual_dir", ] a__ : str = [ # (pattern, replacement) # Order is important here for some replacements (R"tfds\.core", R"datasets"), (R"tf\.io\.gfile\.GFile", R"open"), (R"tf\.([\w\d]+)", R"datasets.Value(\'\1\')"), (R"tfds\.features\.Text\(\)", R"datasets.Value(\'string\')"), (R"tfds\.features\.Text\(", R"datasets.Value(\'string\'),"), (R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("), (R"tfds\.features\.FeaturesDict\(", R"dict("), (R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"), (R"tfds\.", R"datasets."), (R"dl_manager\.manual_dir", R"self.config.data_dir"), (R"self\.builder_config", R"self.config"), ] def _lowerCAmelCase ( A__ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase__( __a ): @staticmethod def UpperCAmelCase ( lowerCAmelCase : Tuple) -> Dict: """simple docstring""" lowercase__ = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='Path to the HuggingFace Datasets folder.') train_parser.set_defaults(func=lowerCAmelCase_) def __init__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , *lowerCAmelCase : Dict) -> Union[str, Any]: """simple docstring""" lowercase__ = get_logger('datasets-cli/converting') lowercase__ = tfds_path lowercase__ = datasets_directory def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" if os.path.isdir(self._tfds_path): lowercase__ = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): lowercase__ = os.path.dirname(self._tfds_path) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.') lowercase__ = os.path.abspath(self._datasets_directory) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''') lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path): lowercase__ = os.listdir(lowerCAmelCase_) else: lowercase__ = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''') lowercase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) lowercase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) if not os.path.isfile(lowerCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file') continue with open(lowerCAmelCase_ , encoding='utf-8') as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here lowercase__ = '' continue elif "from absl import logging" in out_line: lowercase__ = 'from datasets import logging\n' elif "getLogger" in out_line: lowercase__ = out_line.replace('getLogger' , 'get_logger') elif any(expression in out_line for expression in TO_HIGHLIGHT): lowercase__ = True lowercase__ = list(filter(lambda lowerCAmelCase: e in out_line , lowerCAmelCase_)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase_) + '\n') out_lines.append(lowerCAmelCase_) out_lines.append(lowerCAmelCase_) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase_) tfds_imports.extend(imp.strip() for imp in match.group(1).split(',')) lowercase__ = 'from . import ' + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''') if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(lowerCAmelCase_) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace('.py' , '') lowercase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) lowercase__ = os.path.join(lowerCAmelCase_ , lowerCAmelCase_) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_) self._logger.info(f'''Adding directory {output_dir}''') imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase_) if needs_manual_update: with_manual_update.append(lowerCAmelCase_) with open(lowerCAmelCase_ , 'w' , encoding='utf-8') as f: f.writelines(lowerCAmelCase_) self._logger.info(f'''Converted in {output_file}''') for utils_file in utils_files: try: lowercase__ = os.path.basename(lowerCAmelCase_) lowercase__ = imports_to_builder_map[f_name.replace('.py' , '')] self._logger.info(f'''Moving {dest_folder} to {utils_file}''') shutil.copy(lowerCAmelCase_ , lowerCAmelCase_) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''') if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
719
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : str = (DDIMParallelScheduler,) A : Any = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowerCAmelCase) return config def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**lowerCAmelCase) lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase) for t in scheduler.timesteps: lowercase__ = model(lowerCAmelCase , lowerCAmelCase) lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample return sample def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1) lowercase__ = scheduler_class(**lowerCAmelCase) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1])) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , ) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]): self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]): self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5 def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 scheduler.set_timesteps(lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = self.dummy_sample_deter + 0.1 lowercase__ = self.dummy_sample_deter - 0.1 lowercase__ = samplea.shape[0] lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0) lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase) lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 11_47.79_04) < 1E-2 assert abs(result_mean.item() - 0.49_82) < 1E-3 def UpperCAmelCase ( self : Any) -> int: """simple docstring""" lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_72.00_67) < 1E-2 assert abs(result_mean.item() - 0.22_39_67) < 1E-3 def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(prediction_type='v_prediction') lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 52.53_02) < 1E-2 assert abs(result_mean.item() - 0.06_84) < 1E-3 def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.82_95) < 1E-2 assert abs(result_mean.item() - 0.19_51) < 1E-3 def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.07_84) < 1E-2 assert abs(result_mean.item() - 0.19_41) < 1E-3
642
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTConfig, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a__ : Tuple = logging.get_logger(__name__) def _lowerCAmelCase ( A__ ): lowercase__ = MobileViTConfig() # size of the architecture if "mobilevit_s" in mobilevit_name: lowercase__ = [144, 192, 240] lowercase__ = [16, 32, 64, 96, 128, 160, 640] elif "mobilevit_xs" in mobilevit_name: lowercase__ = [96, 120, 144] lowercase__ = [16, 32, 48, 64, 80, 96, 384] elif "mobilevit_xxs" in mobilevit_name: lowercase__ = [64, 80, 96] lowercase__ = [16, 16, 24, 48, 64, 80, 320] lowercase__ = 0.05 lowercase__ = 2.0 if mobilevit_name.startswith('deeplabv3_' ): lowercase__ = 512 lowercase__ = 16 lowercase__ = 21 lowercase__ = """pascal-voc-id2label.json""" else: lowercase__ = 1_000 lowercase__ = """imagenet-1k-id2label.json""" lowercase__ = """huggingface/label-files""" lowercase__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) lowercase__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowercase__ = idalabel lowercase__ = {v: k for k, v in idalabel.items()} return config def _lowerCAmelCase ( A__ , A__=False ): for i in range(1 , 6 ): if F'''layer_{i}.''' in name: lowercase__ = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' ) if "conv_1." in name: lowercase__ = name.replace('conv_1.' , 'conv_stem.' ) if ".block." in name: lowercase__ = name.replace('.block.' , '.' ) if "exp_1x1" in name: lowercase__ = name.replace('exp_1x1' , 'expand_1x1' ) if "red_1x1" in name: lowercase__ = name.replace('red_1x1' , 'reduce_1x1' ) if ".local_rep.conv_3x3." in name: lowercase__ = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' ) if ".local_rep.conv_1x1." in name: lowercase__ = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' ) if ".norm." in name: lowercase__ = name.replace('.norm.' , '.normalization.' ) if ".conv." in name: lowercase__ = name.replace('.conv.' , '.convolution.' ) if ".conv_proj." in name: lowercase__ = name.replace('.conv_proj.' , '.conv_projection.' ) for i in range(0 , 2 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: lowercase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' ) for i in range(2 , 6 ): for j in range(0 , 4 ): if F'''.{i}.{j}.''' in name: lowercase__ = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' ) if "expand_1x1" in name: lowercase__ = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' ) if "conv_3x3" in name: lowercase__ = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' ) if "reduce_1x1" in name: lowercase__ = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' ) for i in range(2 , 5 ): if F'''.global_rep.{i}.weight''' in name: lowercase__ = name.replace(F'''.global_rep.{i}.weight''' , '.layernorm.weight' ) if F'''.global_rep.{i}.bias''' in name: lowercase__ = name.replace(F'''.global_rep.{i}.bias''' , '.layernorm.bias' ) if ".global_rep." in name: lowercase__ = name.replace('.global_rep.' , '.transformer.' ) if ".pre_norm_mha.0." in name: lowercase__ = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' ) if ".pre_norm_mha.1.out_proj." in name: lowercase__ = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' ) if ".pre_norm_ffn.0." in name: lowercase__ = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' ) if ".pre_norm_ffn.1." in name: lowercase__ = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' ) if ".pre_norm_ffn.4." in name: lowercase__ = name.replace('.pre_norm_ffn.4.' , '.output.dense.' ) if ".transformer." in name: lowercase__ = name.replace('.transformer.' , '.transformer.layer.' ) if ".aspp_layer." in name: lowercase__ = name.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in name: lowercase__ = name.replace('.aspp_pool.' , '.' ) if "seg_head." in name: lowercase__ = name.replace('seg_head.' , 'segmentation_head.' ) if "segmentation_head.classifier.classifier." in name: lowercase__ = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' ) if "classifier.fc." in name: lowercase__ = name.replace('classifier.fc.' , 'classifier.' ) elif (not base_model) and ("segmentation_head." not in name): lowercase__ = """mobilevit.""" + name return name def _lowerCAmelCase ( A__ , A__ , A__=False ): if base_model: lowercase__ = """""" else: lowercase__ = """mobilevit.""" for key in orig_state_dict.copy().keys(): lowercase__ = orig_state_dict.pop(UpperCamelCase__ ) if key[:8] == "encoder.": lowercase__ = key[8:] if "qkv" in key: lowercase__ = key.split('.' ) lowercase__ = int(key_split[0][6:] ) - 1 lowercase__ = int(key_split[3] ) lowercase__ = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' ) lowercase__ = layer.transformer.layer[transformer_num].attention.attention.all_head_size lowercase__ = ( F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.''' ) if "weight" in key: lowercase__ = val[:dim, :] lowercase__ = val[dim : dim * 2, :] lowercase__ = val[-dim:, :] else: lowercase__ = val[:dim] lowercase__ = val[dim : dim * 2] lowercase__ = val[-dim:] else: lowercase__ = val return orig_state_dict def _lowerCAmelCase ( ): lowercase__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def _lowerCAmelCase ( A__ , A__ , A__ , A__=False ): lowercase__ = get_mobilevit_config(UpperCamelCase__ ) # load original state_dict lowercase__ = torch.load(UpperCamelCase__ , map_location='cpu' ) # load 🤗 model if mobilevit_name.startswith('deeplabv3_' ): lowercase__ = MobileViTForSemanticSegmentation(UpperCamelCase__ ).eval() else: lowercase__ = MobileViTForImageClassification(UpperCamelCase__ ).eval() lowercase__ = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase__ = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase__ = image_processor(images=prepare_img() , return_tensors='pt' ) lowercase__ = model(**UpperCamelCase__ ) lowercase__ = outputs.logits if mobilevit_name.startswith('deeplabv3_' ): assert logits.shape == (1, 21, 32, 32) if mobilevit_name == "deeplabv3_mobilevit_s": lowercase__ = torch.tensor( [ [[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]], [[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]], [[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xs": lowercase__ = torch.tensor( [ [[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]], [[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]], [[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]], ] ) elif mobilevit_name == "deeplabv3_mobilevit_xxs": lowercase__ = torch.tensor( [ [[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]], [[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.86_24, -9.59_64], [-10.8_840, -10.8_158, -10.6_659]], [[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]], ] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) else: assert logits.shape == (1, 1_000) if mobilevit_name == "mobilevit_s": lowercase__ = torch.tensor([-0.98_66, 0.23_92, -1.12_41] ) elif mobilevit_name == "mobilevit_xs": lowercase__ = torch.tensor([-2.47_61, -0.93_99, -1.95_87] ) elif mobilevit_name == "mobilevit_xxs": lowercase__ = torch.tensor([-1.93_64, -1.23_27, -0.46_53] ) else: raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' ) assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(UpperCamelCase__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: lowercase__ = { """mobilevit_s""": """mobilevit-small""", """mobilevit_xs""": """mobilevit-x-small""", """mobilevit_xxs""": """mobilevit-xx-small""", """deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""", """deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""", """deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""", } print('Pushing to the hub...' ) lowercase__ = model_mapping[mobilevit_name] image_processor.push_to_hub(UpperCamelCase__ , organization='apple' ) model.push_to_hub(UpperCamelCase__ , organization='apple' ) if __name__ == "__main__": a__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( "--mobilevit_name", default="mobilevit_s", type=str, help=( "Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\'," " \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'." ), ) parser.add_argument( "--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) a__ : int = parser.parse_args() convert_movilevit_checkpoint( args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
720
import cva import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict: """simple docstring""" if k in (0.04, 0.06): lowercase__ = k lowercase__ = window_size else: raise ValueError('invalid k value') def __str__( self : Tuple) -> str: """simple docstring""" return str(self.k) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowercase__ = cva.imread(lowerCAmelCase , 0) lowercase__, lowercase__ = img.shape lowercase__ = [] lowercase__ = img.copy() lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB) lowercase__, lowercase__ = np.gradient(lowerCAmelCase) lowercase__ = dx**2 lowercase__ = dy**2 lowercase__ = dx * dy lowercase__ = 0.04 lowercase__ = self.window_size // 2 for y in range(lowerCAmelCase , h - offset): for x in range(lowerCAmelCase , w - offset): lowercase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = (wxx * wyy) - (wxy**2) lowercase__ = wxx + wyy lowercase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0) , 0) color_img.itemset((y, x, 1) , 0) color_img.itemset((y, x, 2) , 2_55) return color_img, corner_list if __name__ == "__main__": a__ : Dict = HarrisCorner(0.0_4, 3) a__ , a__ : Dict = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
642
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: a__ : List[Any] = None a__ : Any = logging.get_logger(__name__) a__ : int = "▁" a__ : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} a__ : List[Any] = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } a__ : Optional[int] = { "google/pegasus-xsum": 5_12, } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Any = VOCAB_FILES_NAMES A : List[str] = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Optional[Any] = PegasusTokenizer A : List[str] = ["input_ids", "attention_mask"] def __init__( self : List[str] , lowerCAmelCase : Dict=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : str="</s>" , lowerCAmelCase : Union[str, Any]="<unk>" , lowerCAmelCase : Tuple="<mask_2>" , lowerCAmelCase : int="<mask_1>" , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=1_03 , **lowerCAmelCase : int , ) -> Optional[int]: """simple docstring""" lowercase__ = offset if additional_special_tokens is not None: if not isinstance(UpperCAmelCase__ , UpperCAmelCase__): raise TypeError( f'''additional_special_tokens should be of type {type(UpperCAmelCase__)}, but is''' f''' {type(UpperCAmelCase__)}''') lowercase__ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(UpperCAmelCase__) , self.offset - 1) ] if len(set(UpperCAmelCase__)) != len(UpperCAmelCase__): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''') lowercase__ = additional_special_tokens_extended else: lowercase__ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset)] super().__init__( UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , mask_token_sent=UpperCAmelCase__ , offset=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) lowercase__ = vocab_file lowercase__ = False if not self.vocab_file else True def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Tuple) -> Optional[Any]: """simple docstring""" lowercase__ = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( 'There should be 3 special tokens: mask_token, pad_token, and eos_token +' f''' {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}''') return [1 if x in all_special_ids else 0 for x in seq] def UpperCAmelCase ( self : int , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCAmelCase__) elif token_ids_a is None: return self._special_token_mask(UpperCAmelCase__) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=None) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(UpperCAmelCase__): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return lowercase__ = os.path.join( UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase__): copyfile(self.vocab_file , UpperCAmelCase__) return (out_vocab_file,)
721
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : List[Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : int = "speech_to_text" A : Optional[Any] = ["past_key_values"] A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict: """simple docstring""" lowercase__ = vocab_size lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = max_source_positions lowercase__ = max_target_positions lowercase__ = num_conv_layers lowercase__ = list(lowerCAmelCase) lowercase__ = conv_channels lowercase__ = input_feat_per_channel lowercase__ = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''') super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
642
0
def _lowerCAmelCase ( A__ ): lowercase__ = int(A__ ) if n_element < 1: lowercase__ = ValueError('a should be a positive number' ) raise my_error lowercase__ = [1] lowercase__, lowercase__, lowercase__ = (0, 0, 0) lowercase__ = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": a__ : Optional[int] = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") a__ : List[Any] = hamming(int(n)) print("-----------------------------------------------------") print(F'''The list with nth numbers is: {hamming_numbers}''') print("-----------------------------------------------------")
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def _lowerCAmelCase ( A__ ): return (data["data"], data["target"]) def _lowerCAmelCase ( A__ , A__ ): lowercase__ = XGBClassifier() classifier.fit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return classifier def _lowerCAmelCase ( ): lowercase__ = load_iris() lowercase__, lowercase__ = data_handling(_SCREAMING_SNAKE_CASE ) lowercase__, lowercase__, lowercase__, lowercase__ = train_test_split( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , test_size=0.25 ) lowercase__ = iris['target_names'] # Create an XGBoost Classifier from the training data lowercase__ = xgboost(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , display_labels=_SCREAMING_SNAKE_CASE , cmap='Blues' , normalize='true' , ) plt.title('Normalized Confusion Matrix - IRIS Dataset' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
701
# Imports import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int: """simple docstring""" if red is not None: lowercase__ = red if green is not None: lowercase__ = green if blue is not None: lowercase__ = blue if red_edge is not None: lowercase__ = red_edge if nir is not None: lowercase__ = nir return True def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) lowercase__ = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!') return False def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def UpperCAmelCase ( self : int) -> Any: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" return self.nir * (self.red / (self.green**2)) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return (self.nir / self.green) - 1 def UpperCAmelCase ( self : Any) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" return (self.red - self.blue) / self.red def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" return self.nir - self.green def UpperCAmelCase ( self : Tuple) -> List[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def UpperCAmelCase ( self : str) -> int: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple: """simple docstring""" return (self.nir - b) / (a * self.red) def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def UpperCAmelCase ( self : int) -> str: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return self.green / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" return self.red / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max_value - min_value) / max_value def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
642
0
'''simple docstring''' import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() a__ : Optional[int] = logging.get_logger(__name__) a__ : int = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } a__ : Dict = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ): for attribute in key.split('.' ): lowercase__ = getattr(lowercase__ , lowercase__ ) if weight_type is not None: lowercase__ = getattr(lowercase__ , lowercase__ ).shape else: lowercase__ = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase__ = value elif weight_type == "weight_g": lowercase__ = value elif weight_type == "weight_v": lowercase__ = value elif weight_type == "bias": lowercase__ = value else: lowercase__ = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def _lowerCAmelCase ( A__ , A__ ): lowercase__ = [] lowercase__ = fairseq_model.state_dict() lowercase__ = hf_model.feature_extractor for name, value in fairseq_dict.items(): lowercase__ = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: lowercase__ = True if "*" in mapped_key: lowercase__ = name.split(lowercase__ )[0].split('.' )[-2] lowercase__ = mapped_key.replace('*' , lowercase__ ) if "weight_g" in name: lowercase__ = 'weight_g' elif "weight_v" in name: lowercase__ = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: lowercase__ = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ = 'weight' else: lowercase__ = None set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) continue if not is_used: unused_weights.append(lowercase__ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ ): lowercase__ = full_name.split('conv_layers.' )[-1] lowercase__ = name.split('.' ) lowercase__ = int(items[0] ) lowercase__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase__ = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase__ = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def _lowerCAmelCase ( A__ , A__ , A__=None ): lowercase__ = torch.load(lowercase__ ) lowercase__ = WavLMConfigOrig(checkpoint['cfg'] ) lowercase__ = WavLMOrig(lowercase__ ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: lowercase__ = WavLMConfig.from_pretrained(lowercase__ ) else: lowercase__ = WavLMConfig() lowercase__ = WavLMModel(lowercase__ ) recursively_load_weights(lowercase__ , lowercase__ ) hf_wavlm.save_pretrained(lowercase__ ) if __name__ == "__main__": a__ : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") a__ : List[Any] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
702
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ): '''simple docstring''' def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" lowercase__ = load_tool('text-classification') self.tool.setup() lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Tuple: """simple docstring""" lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive')
642
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Union[str, Any] = { 'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json', 'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json', # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class UpperCAmelCase__( a__ ): '''simple docstring''' A : List[str] = """xlm-roberta-xl""" def __init__( self : Any , lowerCAmelCase : List[str]=25_08_80 , lowerCAmelCase : Any=25_60 , lowerCAmelCase : Optional[int]=36 , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Any=1_02_40 , lowerCAmelCase : int="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Tuple=5_14 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Optional[int]=1E-0_5 , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : str=2 , lowerCAmelCase : str="absolute" , lowerCAmelCase : Tuple=True , lowerCAmelCase : int=None , **lowerCAmelCase : Any , ) -> List[str]: """simple docstring""" super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = use_cache lowercase__ = classifier_dropout class UpperCAmelCase__( a__ ): '''simple docstring''' @property def UpperCAmelCase ( self : List[Any]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: lowercase__ = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ])
703
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[Any] = None A : Optional[int] = None @property def UpperCAmelCase ( self : str) -> Union[str, Any]: """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase ( self : int) -> Any: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(lowerCAmelCase , 'feature_size')) self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate')) self.assertTrue(hasattr(lowerCAmelCase , 'padding_value')) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name]))) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_torch def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_tf def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = self.feat_extract_tester.seq_length_diff lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff lowercase__ = self.feat_extract_tester.min_seq_length lowercase__ = self.feat_extract_tester.batch_size lowercase__ = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest') lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1])) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') lowercase__ = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np') lowercase__ = input_a[input_name] self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) self.assertTrue(len(input_a[0]) == pad_min_length) self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0]))) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a)) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size) # Check padding value is correct lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1E-3) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) # truncate to smallest lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0])) lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to smallest with np lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(input_a.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to middle lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowercase__ = 12 lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , ) lowercase__ = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowercase__ = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) @require_torch def UpperCAmelCase ( self : Dict) -> List[str]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2) @require_tf def UpperCAmelCase ( self : str) -> str: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2) def UpperCAmelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = min(lowerCAmelCase) lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual( list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length]) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
642
0
from __future__ import annotations from functools import lru_cache from math import ceil a__ : Dict = 1_00 a__ : str = set(range(3, NUM_PRIMES, 2)) primes.add(2) a__ : str = 42 for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def _lowerCAmelCase ( A__ ): if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} lowercase__ = set() lowercase__ = 42 lowercase__ = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def _lowerCAmelCase ( A__ = 5_000 ): for number_to_partition in range(1 , SCREAMING_SNAKE_CASE__ ): if len(partition(SCREAMING_SNAKE_CASE__ ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F'''{solution() = }''')
704
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowerCAmelCase ( A__ ): lowercase__ = prime_factors(A__ ) if is_square_free(A__ ): return -1 if len(A__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
642
0
import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a__ : Optional[Any] = logging.get_logger(__name__) a__ : str = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} a__ : Tuple = { "vocab_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json" ), }, "merges_file": { "allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt", "allenai/longformer-large-4096": ( "https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt" ), "allenai/longformer-large-4096-finetuned-triviaqa": ( "https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt" ), "allenai/longformer-base-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt" ), "allenai/longformer-large-4096-extra.pos.embd.only": ( "https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt" ), }, } a__ : Dict = { "allenai/longformer-base-4096": 40_96, "allenai/longformer-large-4096": 40_96, "allenai/longformer-large-4096-finetuned-triviaqa": 40_96, "allenai/longformer-base-4096-extra.pos.embd.only": 40_96, "allenai/longformer-large-4096-extra.pos.embd.only": 40_96, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def _lowerCAmelCase ( ): lowercase__ = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(a_ ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(a_ ) for n in cs] return dict(zip(a_ , a_ ) ) def _lowerCAmelCase ( A__ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class UpperCAmelCase__( __SCREAMING_SNAKE_CASE ): '''simple docstring''' A : Any = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : str = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Dict="replace" , lowerCAmelCase : List[Any]="<s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Any="<s>" , lowerCAmelCase : List[str]="<unk>" , lowerCAmelCase : Optional[Any]="<pad>" , lowerCAmelCase : Optional[Any]="<mask>" , lowerCAmelCase : Union[str, Any]=False , **lowerCAmelCase : int , ) -> Any: """simple docstring""" lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else bos_token lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else eos_token lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else sep_token lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else cls_token lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else unk_token lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else mask_token super().__init__( errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , ) with open(__snake_case , encoding='utf-8' ) as vocab_handle: lowercase__ = json.load(__snake_case ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(__snake_case , encoding='utf-8' ) as merges_handle: lowercase__ = merges_handle.read().split('\n' )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(__snake_case , range(len(__snake_case ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def UpperCAmelCase ( self : Any ) -> int: """simple docstring""" return len(self.encoder ) def UpperCAmelCase ( self : Any ) -> Tuple: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Tuple ) -> Optional[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = tuple(__snake_case ) lowercase__ = get_pairs(__snake_case ) if not pairs: return token while True: lowercase__ = min(__snake_case , key=lambda lowerCAmelCase : self.bpe_ranks.get(__snake_case , float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(__snake_case ): try: lowercase__ = word.index(__snake_case , __snake_case ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(__snake_case ) lowercase__ = new_word if len(__snake_case ) == 1: break else: lowercase__ = get_pairs(__snake_case ) lowercase__ = ''' '''.join(__snake_case ) lowercase__ = word return word def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str ) -> List[Any]: """simple docstring""" lowercase__ = [] for token in re.findall(self.pat , __snake_case ): lowercase__ = ''''''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__snake_case ).split(' ' ) ) return bpe_tokens def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple ) -> Tuple: """simple docstring""" return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) ) def UpperCAmelCase ( self : str , lowerCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" return self.decoder.get(__snake_case ) def UpperCAmelCase ( self : Any , lowerCAmelCase : Any ) -> str: """simple docstring""" lowercase__ = ''''''.join(__snake_case ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> Dict: """simple docstring""" if not os.path.isdir(__snake_case ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowercase__ = os.path.join( __snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(__snake_case , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__snake_case , ensure_ascii=__snake_case ) + '\n' ) lowercase__ = 0 with open(__snake_case , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) lowercase__ = token_index writer.write(' '.join(__snake_case ) + '\n' ) index += 1 return vocab_file, merge_file def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> Optional[Any]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase__ = [self.cls_token_id] lowercase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None , lowerCAmelCase : bool = False ) -> int: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case ) if token_ids_a is None: return [1] + ([0] * len(__snake_case )) + [1] return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1] def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> Union[str, Any]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=False , **lowerCAmelCase : Any ) -> int: """simple docstring""" lowercase__ = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__snake_case ) > 0 and not text[0].isspace()): lowercase__ = ''' ''' + text return (text, kwargs)
705
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ : List[str] = logging.get_logger(__name__) a__ : List[Any] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ): '''simple docstring''' A : List[str] = "focalnet" def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = use_conv_embed lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = focal_levels lowercase__ = focal_windows lowercase__ = hidden_act lowercase__ = mlp_ratio lowercase__ = hidden_dropout_prob lowercase__ = drop_path_rate lowercase__ = use_layerscale lowercase__ = layerscale_value lowercase__ = use_post_layernorm lowercase__ = use_post_layernorm_in_modulation lowercase__ = normalize_modulator lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = encoder_stride lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)] lowercase__, lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
642
0
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger a__ : Dict = """<<<<<<< This should probably be modified because it mentions: """ a__ : Tuple = """======= >>>>>>> """ a__ : List[str] = [ """TextEncoderConfig""", """ByteTextEncoder""", """SubwordTextEncoder""", """encoder_config""", """maybe_build_from_corpus""", """manual_dir""", ] a__ : List[str] = [ # (pattern, replacement) # Order is important here for some replacements (r"""tfds\.core""", r"""datasets"""), (r"""tf\.io\.gfile\.GFile""", r"""open"""), (r"""tf\.([\w\d]+)""", r"""datasets.Value('\1')"""), (r"""tfds\.features\.Text\(\)""", r"""datasets.Value('string')"""), (r"""tfds\.features\.Text\(""", r"""datasets.Value('string'),"""), (r"""features\s*=\s*tfds.features.FeaturesDict\(""", r"""features=datasets.Features("""), (r"""tfds\.features\.FeaturesDict\(""", r"""dict("""), (r"""The TensorFlow Datasets Authors""", r"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""), (r"""tfds\.""", r"""datasets."""), (r"""dl_manager\.manual_dir""", r"""self.config.data_dir"""), (r"""self\.builder_config""", r"""self.config"""), ] def _lowerCAmelCase ( A__ ): return ConvertCommand(args.tfds_path , args.datasets_directory ) class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @staticmethod def UpperCAmelCase ( lowerCAmelCase : ArgumentParser) -> Optional[int]: """simple docstring""" lowercase__ = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=lowerCAmelCase , required=lowerCAmelCase , help='Path to the HuggingFace Datasets folder.') train_parser.set_defaults(func=lowerCAmelCase) def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str , *lowerCAmelCase : str) -> Optional[Any]: """simple docstring""" lowercase__ = get_logger('datasets-cli/converting') lowercase__ = tfds_path lowercase__ = datasets_directory def UpperCAmelCase ( self : List[Any]) -> List[str]: """simple docstring""" if os.path.isdir(self._tfds_path): lowercase__ = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): lowercase__ = os.path.dirname(self._tfds_path) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.') lowercase__ = os.path.abspath(self._datasets_directory) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''') lowercase__ = [] lowercase__ = [] lowercase__ = {} if os.path.isdir(self._tfds_path): lowercase__ = os.listdir(lowerCAmelCase) else: lowercase__ = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''') lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase) lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase) if not os.path.isfile(lowerCAmelCase) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file') continue with open(lowerCAmelCase , encoding='utf-8') as f: lowercase__ = f.readlines() lowercase__ = [] lowercase__ = False lowercase__ = False lowercase__ = [] for line in lines: lowercase__ = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: lowercase__ = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here lowercase__ = '' continue elif "from absl import logging" in out_line: lowercase__ = 'from datasets import logging\n' elif "getLogger" in out_line: lowercase__ = out_line.replace('getLogger' , 'get_logger') elif any(expression in out_line for expression in TO_HIGHLIGHT): lowercase__ = True lowercase__ = list(filter(lambda lowerCAmelCase: e in out_line , lowerCAmelCase)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCAmelCase) + '\n') out_lines.append(lowerCAmelCase) out_lines.append(lowerCAmelCase) continue else: for pattern, replacement in TO_CONVERT: lowercase__ = re.sub(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: lowercase__ = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , lowerCAmelCase) tfds_imports.extend(imp.strip() for imp in match.group(1).split(',')) lowercase__ = 'from . import ' + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''') if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: lowercase__ = True out_lines.append(lowerCAmelCase) if is_builder or "wmt" in f_name: # We create a new directory for each dataset lowercase__ = f_name.replace('.py' , '') lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase) lowercase__ = os.path.join(lowerCAmelCase , lowerCAmelCase) os.makedirs(lowerCAmelCase , exist_ok=lowerCAmelCase) self._logger.info(f'''Adding directory {output_dir}''') imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(lowerCAmelCase) if needs_manual_update: with_manual_update.append(lowerCAmelCase) with open(lowerCAmelCase , 'w' , encoding='utf-8') as f: f.writelines(lowerCAmelCase) self._logger.info(f'''Converted in {output_file}''') for utils_file in utils_files: try: lowercase__ = os.path.basename(lowerCAmelCase) lowercase__ = imports_to_builder_map[f_name.replace('.py' , '')] self._logger.info(f'''Moving {dest_folder} to {utils_file}''') shutil.copy(lowerCAmelCase , lowerCAmelCase) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''') if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
706
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Dict = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } a__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } a__ : Any = {"facebook/blenderbot_small-90M": 5_12} def _lowerCAmelCase ( A__ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char lowercase__ = set(A__ ) return pairs class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[str] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase) with open(lowerCAmelCase , encoding='utf-8') as vocab_handle: lowercase__ = json.load(lowerCAmelCase) lowercase__ = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase , encoding='utf-8') as merges_handle: lowercase__ = merges_handle.read().split('\n')[1:-1] lowercase__ = [tuple(merge.split()) for merge in merges] lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = {} @property def UpperCAmelCase ( self : int) -> int: """simple docstring""" return len(self.encoder) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase) lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase) lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase) if "\n" in token: lowercase__ = token.replace('\n' , ' __newln__') lowercase__ = token.split(' ') lowercase__ = [] for token in tokens: if not len(lowerCAmelCase): continue lowercase__ = token.lower() lowercase__ = tuple(lowerCAmelCase) lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>']) lowercase__ = get_pairs(lowerCAmelCase) if not pairs: words.append(lowerCAmelCase) continue while True: lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf'))) if bigram not in self.bpe_ranks: break lowercase__, lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(lowerCAmelCase): try: lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase) new_word.extend(word[i:j]) lowercase__ = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 lowercase__ = tuple(lowerCAmelCase) lowercase__ = new_word if len(lowerCAmelCase) == 1: break else: lowercase__ = get_pairs(lowerCAmelCase) lowercase__ = '@@ '.join(lowerCAmelCase) lowercase__ = word[:-4] lowercase__ = word words.append(lowerCAmelCase) return " ".join(lowerCAmelCase) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' '))) return split_tokens def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int: """simple docstring""" lowercase__ = token.lower() return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token)) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str: """simple docstring""" return self.decoder.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str: """simple docstring""" lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip() return out_string def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(lowerCAmelCase , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n') lowercase__ = 0 with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!') lowercase__ = token_index writer.write(' '.join(lowerCAmelCase) + '\n') index += 1 return vocab_file, merge_file
642
0
import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = SamImageProcessor() lowercase__ = SamProcessor(__a) processor.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : List[Any] , **lowerCAmelCase : List[str]) -> int: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__a).image_processor def UpperCAmelCase ( self : Dict) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] lowercase__ = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] return image_inputs def UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" lowercase__ = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowercase__ = self.get_image_processor(do_normalize=__a , padding_value=1.0) lowercase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __a) def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = self.get_image_processor() lowercase__ = SamProcessor(image_processor=__a) lowercase__ = self.prepare_image_inputs() lowercase__ = image_processor(__a , return_tensors='np') lowercase__ = processor(images=__a , return_tensors='np') input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) @require_torch def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = self.get_image_processor() lowercase__ = SamProcessor(image_processor=__a) lowercase__ = [torch.ones((1, 3, 5, 5))] lowercase__ = [[17_64, 26_46]] lowercase__ = [[6_83, 10_24]] lowercase__ = processor.post_process_masks(__a , __a , __a) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46)) lowercase__ = processor.post_process_masks( __a , torch.tensor(__a) , torch.tensor(__a)) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46)) # should also work with np lowercase__ = [np.ones((1, 3, 5, 5))] lowercase__ = processor.post_process_masks(__a , np.array(__a) , np.array(__a)) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46)) lowercase__ = [[1, 0], [0, 1]] with self.assertRaises(__a): lowercase__ = processor.post_process_masks(__a , np.array(__a) , np.array(__a)) @require_vision @require_tf class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> List[str]: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = SamImageProcessor() lowercase__ = SamProcessor(__a) processor.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : List[Any] , **lowerCAmelCase : str) -> List[Any]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__a).image_processor def UpperCAmelCase ( self : Optional[int]) -> Any: """simple docstring""" shutil.rmtree(self.tmpdirname) def UpperCAmelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] lowercase__ = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] return image_inputs def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" lowercase__ = SamProcessor(image_processor=self.get_image_processor()) processor.save_pretrained(self.tmpdirname) lowercase__ = self.get_image_processor(do_normalize=__a , padding_value=1.0) lowercase__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor , __a) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.get_image_processor() lowercase__ = SamProcessor(image_processor=__a) lowercase__ = self.prepare_image_inputs() lowercase__ = image_processor(__a , return_tensors='np') lowercase__ = processor(images=__a , return_tensors='np') input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2) @require_tf def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_image_processor() lowercase__ = SamProcessor(image_processor=__a) lowercase__ = [tf.ones((1, 3, 5, 5))] lowercase__ = [[17_64, 26_46]] lowercase__ = [[6_83, 10_24]] lowercase__ = processor.post_process_masks(__a , __a , __a , return_tensors='tf') self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46)) lowercase__ = processor.post_process_masks( __a , tf.convert_to_tensor(__a) , tf.convert_to_tensor(__a) , return_tensors='tf' , ) self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46)) # should also work with np lowercase__ = [np.ones((1, 3, 5, 5))] lowercase__ = processor.post_process_masks( __a , np.array(__a) , np.array(__a) , return_tensors='tf') self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46)) lowercase__ = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError): lowercase__ = processor.post_process_masks( __a , np.array(__a) , np.array(__a) , return_tensors='tf') @require_vision @require_torchvision class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : Any) -> str: """simple docstring""" lowercase__ = tempfile.mkdtemp() lowercase__ = SamImageProcessor() lowercase__ = SamProcessor(__a) processor.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Dict , **lowerCAmelCase : Optional[Any]) -> List[str]: """simple docstring""" return AutoProcessor.from_pretrained(self.tmpdirname , **__a).image_processor def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" shutil.rmtree(self.tmpdirname) def UpperCAmelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" lowercase__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta)] lowercase__ = [Image.fromarray(np.moveaxis(__a , 0 , -1)) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.get_image_processor() lowercase__ = SamProcessor(image_processor=__a) lowercase__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5)).astype(np.floataa) lowercase__ = [tf.convert_to_tensor(__a)] lowercase__ = [torch.tensor(__a)] lowercase__ = [[17_64, 26_46]] lowercase__ = [[6_83, 10_24]] lowercase__ = processor.post_process_masks( __a , __a , __a , return_tensors='tf') lowercase__ = processor.post_process_masks( __a , __a , __a , return_tensors='pt') self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy())) @is_pt_tf_cross_test def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = self.get_image_processor() lowercase__ = SamProcessor(image_processor=__a) lowercase__ = self.prepare_image_inputs() lowercase__ = image_processor(__a , return_tensors='pt')["pixel_values"].numpy() lowercase__ = processor(images=__a , return_tensors='pt')["pixel_values"].numpy() lowercase__ = image_processor(__a , return_tensors='tf')["pixel_values"].numpy() lowercase__ = processor(images=__a , return_tensors='tf')["pixel_values"].numpy() self.assertTrue(np.allclose(__a , __a)) self.assertTrue(np.allclose(__a , __a)) self.assertTrue(np.allclose(__a , __a))
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
def _lowerCAmelCase ( A__ ): lowercase__ = [[0 for _ in range(__UpperCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): lowercase__ = 1 for n in range(m + 1 ): for k in range(1 , __UpperCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: a__ : Optional[Any] = int(input("Enter a number: ").strip()) print(partition(n)) except ValueError: print("Please enter a number.") else: try: a__ : Optional[int] = int(sys.argv[1]) print(partition(n)) except ValueError: print("Please pass a number.")
708
import heapq import sys import numpy as np a__ : Dict = tuple[int, int] class UpperCAmelCase__: '''simple docstring''' def __init__( self : List[str]) -> Any: """simple docstring""" lowercase__ = [] lowercase__ = set() def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float('inf') def UpperCAmelCase ( self : int) -> str: """simple docstring""" return len(self.elements) == 0 def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(lowerCAmelCase) else: # update # print("update", item) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple: """simple docstring""" if item in self.set: self.set.remove(lowerCAmelCase) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" return self.elements[0][1] def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) self.set.remove(lowerCAmelCase) return (priority, item) def _lowerCAmelCase ( A__ , A__ ): # euclidean distance lowercase__ = np.array(A__ ) lowercase__ = np.array(A__ ) return np.linalg.norm(a - b ) def _lowerCAmelCase ( A__ , A__ ): # integer division by time variable return consistent_heuristic(A__ , A__ ) // t def _lowerCAmelCase ( A__ , A__ ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ ) return ans def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = np.chararray((n, n) ) for i in range(A__ ): for j in range(A__ ): lowercase__ = '*' for i in range(A__ ): for j in range(A__ ): if (j, (n - 1) - i) in blocks: lowercase__ = '#' lowercase__ = '-' lowercase__ = back_pointer[goal] while x != start: ((lowercase__), (lowercase__)) = x # print(x) lowercase__ = '-' lowercase__ = back_pointer[x] lowercase__ = '-' for i in range(A__ ): for j in range(A__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) lowercase__ = back_pointer[goal] while x != start: print(A__ , end=' ' ) lowercase__ = back_pointer[x] print(A__ ) sys.exit() def _lowerCAmelCase ( A__ ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ): for itera in range(A__ ): open_list[itera].remove_element(A__ ) # print("s", s) # print("j", j) ((lowercase__), (lowercase__)) = s lowercase__ = (x - 1, y) lowercase__ = (x + 1, y) lowercase__ = (x, y + 1) lowercase__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(A__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(A__ ) lowercase__ = -1 lowercase__ = float('inf' ) if valid(A__ ) and g_function[neighbours] > g_function[s] + 1: lowercase__ = g_function[s] + 1 lowercase__ = s if neighbours not in close_list_anchor: open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) ) if neighbours not in close_list_inad: for var in range(1 , A__ ): if key(A__ , A__ , A__ , A__ ) <= Wa * key( A__ , 0 , A__ , A__ ): open_list[j].put( A__ , key(A__ , A__ , A__ , A__ ) ) def _lowerCAmelCase ( ): lowercase__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} a__ : Any = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] a__ : Any = make_common_ground() a__ : Union[str, Any] = blocks_blk # hyper parameters a__ : List[Any] = 1 a__ : List[str] = 1 a__ : Optional[int] = 20 a__ : Optional[Any] = 3 # one consistent and two other inconsistent # start and end destination a__ : Tuple = (0, 0) a__ : str = (n - 1, n - 1) a__ : Optional[Any] = 1 def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = {start: 0, goal: float('inf' )} lowercase__ = {start: -1, goal: -1} lowercase__ = [] lowercase__ = set() for i in range(A__ ): open_list.append(PriorityQueue() ) open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) ) lowercase__ = [] lowercase__ = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , A__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__, lowercase__ = open_list[i].top_show() visited.add(A__ ) expand_state( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_inad.append(A__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__ = open_list[0].top_show() visited.add(A__ ) expand_state( A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_anchor.append(A__ ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(A__ ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
642
0
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def _lowerCAmelCase ( A__=None ): if subparsers is not None: lowercase__ = subparsers.add_parser('env' ) else: lowercase__ = argparse.ArgumentParser('Accelerate env command' ) parser.add_argument( '--config_file' , default=A__ , help='The config file to use for the default values in the launching script.' ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def _lowerCAmelCase ( A__ ): lowercase__ = torch.__version__ lowercase__ = torch.cuda.is_available() lowercase__ = is_xpu_available() lowercase__ = is_npu_available() lowercase__ = 'Not found' # Get the default from the config file. if args.config_file is not None or os.path.isfile(A__ ): lowercase__ = load_config_from_file(args.config_file ).to_dict() lowercase__ = { '`Accelerate` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Numpy version': np.__version__, 'PyTorch version (GPU?)': F'''{pt_version} ({pt_cuda_available})''', 'PyTorch XPU available': str(A__ ), 'PyTorch NPU available': str(A__ ), 'System RAM': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''', } if pt_cuda_available: lowercase__ = torch.cuda.get_device_name() print('\nCopy-and-paste the text below in your GitHub issue\n' ) print('\n'.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) ) print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' ) lowercase__ = ( '\n'.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(A__ , A__ ) else F'''\t{accelerate_config}''' ) print(A__ ) lowercase__ = accelerate_config return info def _lowerCAmelCase ( ): lowercase__ = env_command_parser() lowercase__ = parser.parse_args() env_command(A__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
709
import math import sys def _lowerCAmelCase ( A__ ): lowercase__ = '' try: with open(A__ , 'rb' ) as binary_file: lowercase__ = binary_file.read() for dat in data: lowercase__ = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = {'0': '0', '1': '1'} lowercase__, lowercase__ = '', '' lowercase__ = len(A__ ) for i in range(len(A__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowercase__ = lexicon[curr_string] result += last_match_id lowercase__ = last_match_id + '0' if math.loga(A__ ).is_integer(): lowercase__ = {} for curr_key in list(A__ ): lowercase__ = lexicon.pop(A__ ) lowercase__ = new_lex lowercase__ = last_match_id + '1' index += 1 lowercase__ = '' return result def _lowerCAmelCase ( A__ , A__ ): lowercase__ = 8 try: with open(A__ , 'wb' ) as opened_file: lowercase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A__ ) , A__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowercase__ = data_bits[counter:] lowercase__ = data_bits[counter + 1 :] return data_bits def _lowerCAmelCase ( A__ , A__ ): lowercase__ = read_file_binary(A__ ) lowercase__ = remove_prefix(A__ ) lowercase__ = decompress_data(A__ ) write_file_binary(A__ , A__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
642
0
# Note: if you intend to run this script make sure you look under scripts/fsmt/ # to locate the appropriate script to do the work correctly. There is a set of scripts to: # - download and prepare data and run the conversion script # - perform eval to get the best hparam into the config # - generate model_cards - useful if you have multiple models from the same paper import argparse import json import os import re from collections import OrderedDict from os.path import basename, dirname import fairseq import torch from fairseq import hub_utils from fairseq.data.dictionary import Dictionary from transformers import FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() a__ : str = 2 # based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping` # values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults: # # * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users) # * `early_stopping`: `False` consistently scored better # * `length_penalty` varied, so will assign the best one depending on the model a__ : Dict = { # fairseq: "wmt19-ru-en": {"length_penalty": 1.1}, "wmt19-en-ru": {"length_penalty": 1.1_5}, "wmt19-en-de": {"length_penalty": 1.0}, "wmt19-de-en": {"length_penalty": 1.1}, # allenai: "wmt16-en-de-dist-12-1": {"length_penalty": 0.6}, "wmt16-en-de-dist-6-1": {"length_penalty": 0.6}, "wmt16-en-de-12-1": {"length_penalty": 0.8}, "wmt19-de-en-6-6-base": {"length_penalty": 0.6}, "wmt19-de-en-6-6-big": {"length_penalty": 0.6}, } # this remaps the different models to their organization names a__ : List[str] = {} for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: a__ : Optional[int] = "facebook" for m in [ "wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1", "wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big", ]: a__ : str = "allenai" def _lowerCAmelCase ( A__ ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowercase__ = dict((re.sub(r'@@$' , '' , snake_case_ ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , snake_case_ ), v) for k, v in d.items() ) lowercase__ = """<s> <pad> </s> <unk>""".split() # restore the special tokens for k in keep_keys: del da[F'''{k}</w>'''] lowercase__ = d[k] # restore return da def _lowerCAmelCase ( A__ , A__ ): # prep assert os.path.exists(snake_case_ ) os.makedirs(snake_case_ , exist_ok=snake_case_ ) print(F'''Writing results to {pytorch_dump_folder_path}''' ) # handle various types of models lowercase__ = basename(snake_case_ ) lowercase__ = dirname(snake_case_ ) lowercase__ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel lowercase__ = cls.hub_models() lowercase__ = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""} lowercase__ = """.""" # note: since the model dump is old, fairseq has upgraded its model some # time later, and it does a whole lot of rewrites and splits on the saved # weights, therefore we can't use torch.load() directly on the model file. # see: upgrade_state_dict(state_dict) in fairseq_model.py print(F'''using checkpoint {checkpoint_file}''' ) lowercase__ = hub_utils.from_pretrained( snake_case_ , snake_case_ , snake_case_ , archive_map=snake_case_ , **snake_case_ ) lowercase__ = vars(chkpt['args']['model'] ) lowercase__ = args["""source_lang"""] lowercase__ = args["""target_lang"""] lowercase__ = dirname(snake_case_ ) lowercase__ = basename(snake_case_ ) # dicts lowercase__ = os.path.join(snake_case_ , F'''dict.{src_lang}.txt''' ) lowercase__ = os.path.join(snake_case_ , F'''dict.{tgt_lang}.txt''' ) lowercase__ = Dictionary.load(snake_case_ ) lowercase__ = rewrite_dict_keys(src_dict.indices ) lowercase__ = len(snake_case_ ) lowercase__ = os.path.join(snake_case_ , 'vocab-src.json' ) print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' ) with open(snake_case_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # detect whether this is a do_lower_case situation, which can be derived by checking whether we # have at least one uppercase letter in the source vocab lowercase__ = True for k in src_vocab.keys(): if not k.islower(): lowercase__ = False break lowercase__ = Dictionary.load(snake_case_ ) lowercase__ = rewrite_dict_keys(tgt_dict.indices ) lowercase__ = len(snake_case_ ) lowercase__ = os.path.join(snake_case_ , 'vocab-tgt.json' ) print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' ) with open(snake_case_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # merges_file (bpecodes) lowercase__ = os.path.join(snake_case_ , VOCAB_FILES_NAMES['merges_file'] ) for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code" lowercase__ = os.path.join(snake_case_ , snake_case_ ) if os.path.exists(snake_case_ ): break with open(snake_case_ , encoding='utf-8' ) as fin: lowercase__ = fin.read() lowercase__ = re.sub(r' \d+$' , '' , snake_case_ , 0 , re.M ) # remove frequency number print(F'''Generating {merges_file}''' ) with open(snake_case_ , 'w' , encoding='utf-8' ) as fout: fout.write(snake_case_ ) # model config lowercase__ = os.path.join(snake_case_ , 'config.json' ) # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe - # may have to modify the tokenizer if a different type is used by a future model assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}''' assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}''' lowercase__ = { """architectures""": ["""FSMTForConditionalGeneration"""], """model_type""": """fsmt""", """activation_dropout""": args["""activation_dropout"""], """activation_function""": """relu""", """attention_dropout""": args["""attention_dropout"""], """d_model""": args["""decoder_embed_dim"""], """dropout""": args["""dropout"""], """init_std""": 0.02, """max_position_embeddings""": args["""max_source_positions"""], """num_hidden_layers""": args["""encoder_layers"""], """src_vocab_size""": src_vocab_size, """tgt_vocab_size""": tgt_vocab_size, """langs""": [src_lang, tgt_lang], """encoder_attention_heads""": args["""encoder_attention_heads"""], """encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""], """encoder_layerdrop""": args["""encoder_layerdrop"""], """encoder_layers""": args["""encoder_layers"""], """decoder_attention_heads""": args["""decoder_attention_heads"""], """decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""], """decoder_layerdrop""": args["""decoder_layerdrop"""], """decoder_layers""": args["""decoder_layers"""], """bos_token_id""": 0, """pad_token_id""": 1, """eos_token_id""": 2, """is_encoder_decoder""": True, """scale_embedding""": not args["""no_scale_embedding"""], """tie_word_embeddings""": args["""share_all_embeddings"""], } # good hparam defaults to start with lowercase__ = 5 lowercase__ = False if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]: lowercase__ = best_score_hparams[model_dir]["""length_penalty"""] else: lowercase__ = 1.0 print(F'''Generating {fsmt_model_config_file}''' ) with open(snake_case_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # tokenizer config lowercase__ = os.path.join(snake_case_ , snake_case_ ) lowercase__ = { """langs""": [src_lang, tgt_lang], """model_max_length""": 1_024, """do_lower_case""": do_lower_case, } print(F'''Generating {fsmt_tokenizer_config_file}''' ) with open(snake_case_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) ) # model lowercase__ = chkpt["""models"""][0] lowercase__ = model.state_dict() # rename keys to start with 'model.' lowercase__ = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() ) # remove unneeded keys lowercase__ = [ """model.model""", """model.encoder.version""", """model.decoder.version""", """model.encoder_embed_tokens.weight""", """model.decoder_embed_tokens.weight""", """model.encoder.embed_positions._float_tensor""", """model.decoder.embed_positions._float_tensor""", ] for k in ignore_keys: model_state_dict.pop(snake_case_ , snake_case_ ) lowercase__ = FSMTConfig.from_pretrained(snake_case_ ) lowercase__ = FSMTForConditionalGeneration(snake_case_ ) # check that it loads ok model_new.load_state_dict(snake_case_ , strict=snake_case_ ) # save lowercase__ = os.path.join(snake_case_ , snake_case_ ) print(F'''Generating {pytorch_weights_dump_path}''' ) torch.save(snake_case_ , snake_case_ ) print('Conversion is done!' ) print('\nLast step is to upload the files to s3' ) print(F'''cd {data_root}''' ) print(F'''transformers-cli upload {model_dir}''' ) if __name__ == "__main__": a__ : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fsmt_checkpoint_path", default=None, type=str, required=True, help=( "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts," " bpecodes, etc." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) a__ : Optional[Any] = parser.parse_args() convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
710
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging a__ : int = logging.get_logger(__name__) a__ : Tuple = {"vocab_file": "vocab.txt"} a__ : int = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } a__ : Dict = { "facebook/esm2_t6_8M_UR50D": 10_24, "facebook/esm2_t12_35M_UR50D": 10_24, } def _lowerCAmelCase ( A__ ): with open(A__ , 'r' ) as f: lowercase__ = f.read().splitlines() return [l.strip() for l in lines] class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = load_vocab_file(lowerCAmelCase) lowercase__ = dict(enumerate(self.all_tokens)) lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)} lowercase__ = unk_token lowercase__ = cls_token lowercase__ = pad_token lowercase__ = mask_token lowercase__ = eos_token lowercase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" return text.split() def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]: """simple docstring""" return len(self._id_to_token) def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens)} def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.cls_token_id] lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!') return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1] if token_ids_a is not None: mask += [0] * len(lowerCAmelCase) + [1] return mask def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt') with open(lowerCAmelCase , 'w') as f: f.write('\n'.join(self.all_tokens)) return (vocab_file,) @property def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int: """simple docstring""" return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
642
0
'''simple docstring''' import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class UpperCAmelCase__( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Any=10_24 , lowerCAmelCase : int=10_24 , lowerCAmelCase : Union[str, Any]=3.6) -> int: """simple docstring""" lowercase__ = tokenizer lowercase__ = tokenizer.bos_token_id lowercase__ = dataset lowercase__ = seq_length lowercase__ = seq_length * chars_per_token * num_of_sequences def __iter__( self : str) -> int: """simple docstring""" lowercase__ = iter(self.dataset) lowercase__ = True while more_examples: lowercase__ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(UpperCamelCase__)['content']) buffer_len += len(buffer[-1]) except StopIteration: lowercase__ = False break lowercase__ = tokenizer(UpperCamelCase__ , truncation=UpperCamelCase__)['''input_ids'''] lowercase__ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id]) for i in range(0 , len(UpperCamelCase__) , self.seq_length): lowercase__ = all_token_ids[i : i + self.seq_length] if len(UpperCamelCase__) == self.seq_length: yield torch.tensor(UpperCamelCase__) def _lowerCAmelCase ( A__ ): lowercase__ = {'''streaming''': True} lowercase__ = load_dataset(args.dataset_name , split='train' , **__UpperCamelCase ) lowercase__ = ConstantLengthDataset(__UpperCamelCase , __UpperCamelCase , seq_length=args.seq_length ) lowercase__ = DataLoader(__UpperCamelCase , batch_size=args.batch_size ) return eval_dataloader def _lowerCAmelCase ( A__ ): model.eval() lowercase__ = [] for step, batch in enumerate(__UpperCamelCase ): with torch.no_grad(): lowercase__ = model(__UpperCamelCase , labels=__UpperCamelCase ) lowercase__ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(__UpperCamelCase ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break lowercase__ = torch.mean(torch.cat(__UpperCamelCase ) ) try: lowercase__ = torch.exp(__UpperCamelCase ) except OverflowError: lowercase__ = float('inf' ) return loss.item(), perplexity.item() # Setup Accelerator a__ : List[Any] = Accelerator() # Parse configuration a__ : Optional[int] = HfArgumentParser(EvaluationArguments) a__ : Union[str, Any] = parser.parse_args() set_seed(args.seed) # Logging a__ : Dict = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) # Load model and tokenizer a__ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt) a__ : List[str] = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader a__ : Optional[Any] = create_dataloader(args) # Prepare everything with our `accelerator`. a__ : Optional[Any] = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") a__ : Optional[int] = evaluate(args) logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
711
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase) }
642
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : int = logging.get_logger(__name__) a__ : Optional[int] = { "s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json", } class UpperCAmelCase__( lowerCAmelCase__ ): '''simple docstring''' A : Dict = "open-llama" def __init__( self : Optional[int] , lowerCAmelCase : str=10_00_00 , lowerCAmelCase : str=40_96 , lowerCAmelCase : int=1_10_08 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : str="silu" , lowerCAmelCase : List[Any]=20_48 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : str=1E-6 , lowerCAmelCase : str=True , lowerCAmelCase : Dict=0 , lowerCAmelCase : Optional[int]=1 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str=False , lowerCAmelCase : Any=True , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : int=None , **lowerCAmelCase : List[Any] , ) -> int: """simple docstring""" lowercase__ = vocab_size lowercase__ = max_position_embeddings lowercase__ = hidden_size lowercase__ = intermediate_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = initializer_range lowercase__ = rms_norm_eps lowercase__ = use_cache lowercase__ = kwargs.pop( 'use_memorry_efficient_attention' , _lowerCamelCase) lowercase__ = hidden_dropout_prob lowercase__ = attention_dropout_prob lowercase__ = use_stable_embedding lowercase__ = shared_input_output_embedding lowercase__ = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase , ) def UpperCAmelCase ( self : Dict) -> Any: """simple docstring""" if self.rope_scaling is None: return if not isinstance(self.rope_scaling , _lowerCamelCase) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''') lowercase__ = self.rope_scaling.get('type' , _lowerCamelCase) lowercase__ = self.rope_scaling.get('factor' , _lowerCamelCase) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''') if rope_scaling_factor is None or not isinstance(_lowerCamelCase , _lowerCamelCase) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
712
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = block_sizes lowercase__ = num_decoder_layers lowercase__ = d_model lowercase__ = n_head lowercase__ = d_head lowercase__ = d_inner lowercase__ = hidden_act lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = 2 lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = initializer_std # Used in the tests to check the size of the first attention layer lowercase__ = n_head # Used in the tests to check the size of the first hidden state lowercase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowercase__ = self.num_hidden_layers + 2 def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str: """simple docstring""" lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = self.num_choices lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) A : Dict = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) A : Optional[int] = False A : Optional[int] = False def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = TFFunnelModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase) @require_tf class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) A : List[str] = False A : int = False def UpperCAmelCase ( self : Any) -> List[Any]: """simple docstring""" lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
642
0
import random import sys import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap a__ : Optional[Any] = "Usage of script: script_name <size_of_canvas:int>" a__ : List[Any] = [0] * 1_00 + [1] * 10 random.shuffle(choice) def _lowerCAmelCase ( A__ ): lowercase__ = [[False for i in range(__snake_case )] for j in range(__snake_case )] return canvas def _lowerCAmelCase ( A__ ): for i, row in enumerate(__snake_case ): for j, _ in enumerate(__snake_case ): lowercase__ = bool(random.getrandbits(1 ) ) def _lowerCAmelCase ( A__ ): lowercase__ = np.array(__snake_case ) lowercase__ = np.array(create_canvas(current_canvas.shape[0] ) ) for r, row in enumerate(__snake_case ): for c, pt in enumerate(__snake_case ): lowercase__ = __judge_point( __snake_case , current_canvas[r - 1 : r + 2, c - 1 : c + 2] ) lowercase__ = next_gen_canvas del next_gen_canvas # cleaning memory as we move on. lowercase__ = current_canvas.tolist() return return_canvas def _lowerCAmelCase ( A__ , A__ ): lowercase__ = 0 lowercase__ = 0 # finding dead or alive neighbours count. for i in neighbours: for status in i: if status: alive += 1 else: dead += 1 # handling duplicate entry for focus pt. if pt: alive -= 1 else: dead -= 1 # running the rules of game here. lowercase__ = pt if pt: if alive < 2: lowercase__ = False elif alive == 2 or alive == 3: lowercase__ = True elif alive > 3: lowercase__ = False else: if alive == 3: lowercase__ = True return state if __name__ == "__main__": if len(sys.argv) != 2: raise Exception(usage_doc) a__ : Any = int(sys.argv[1]) # main working structure of this module. a__ : Union[str, Any] = create_canvas(canvas_size) seed(c) a__ : int = plt.subplots() fig.show() a__ : str = ListedColormap(["w", "k"]) try: while True: a__ : Dict = run(c) ax.matshow(c, cmap=cmap) fig.canvas.draw() ax.cla() except KeyboardInterrupt: # do nothing. pass
713
def _lowerCAmelCase ( A__ , A__ , A__ ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate lowercase__ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowercase__ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
642
0
class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : int) -> None: """simple docstring""" lowercase__ = set_counts lowercase__ = max(__A) lowercase__ = len(__A) lowercase__ = [1] * num_sets lowercase__ = list(range(__A)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict) -> bool: """simple docstring""" lowercase__ = self.get_parent(__A) lowercase__ = self.get_parent(__A) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase__ = 0 lowercase__ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase__ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase__ = 0 lowercase__ = src_parent lowercase__ = self.set_counts[src_parent] lowercase__ = max(self.max_set , __A) return True def UpperCAmelCase ( self : Any , lowerCAmelCase : Tuple) -> int: """simple docstring""" if self.parents[disj_set] == disj_set: return disj_set lowercase__ = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
714
from __future__ import annotations def _lowerCAmelCase ( A__ , A__ ): if b == 0: return (1, 0) ((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b ) lowercase__ = a // b return (y, x - k * y) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m def _lowerCAmelCase ( A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) if b < 0: lowercase__ = (b % n + n) % n return b def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="chinese_remainder_theorem", verbose=True) testmod(name="chinese_remainder_theorem2", verbose=True) testmod(name="invert_modulo", verbose=True) testmod(name="extended_euclid", verbose=True)
642
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
715
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[Any] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = "umt5" A : List[str] = ["past_key_values"] def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__( is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split('-') lowercase__ = act_info[-1] lowercase__ = act_info[0] == 'gated' if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": lowercase__ = 'gelu_new' @property def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" return self.d_model @property def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" return self.num_heads @property def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return self.num_layers class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowercase__ = 'past_encoder_sequence + sequence' lowercase__ = {0: 'batch'} lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowercase__ = {0: 'batch', 1: 'decoder_sequence'} lowercase__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCAmelCase ( self : int) -> int: """simple docstring""" return 13 @property def UpperCAmelCase ( self : Optional[Any]) -> float: """simple docstring""" return 5E-4
642
0
def _lowerCAmelCase ( A__ = 1_000 ): lowercase__ = 3 lowercase__ = 0 while a < n: if a % 3 == 0 or a % 5 == 0: result += a elif a % 15 == 0: result -= a a += 1 return result if __name__ == "__main__": print(F'''{solution() = }''')
716
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : str = XGLMTokenizer A : List[Any] = XGLMTokenizerFast A : int = True A : Optional[Any] = True def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = '<pad>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : str) -> List[str]: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(len(lowerCAmelCase) , 10_08) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08) def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = tokenizer.tokenize('This is a test') self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return XGLMTokenizer.from_pretrained('facebook/xglm-564M') def UpperCAmelCase ( self : Optional[int]) -> Dict: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase , f.name) lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase) lowercase__ = pickle.dumps(lowerCAmelCase) pickle.loads(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.tokenize(lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @slow def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" lowercase__ = 'Hello World!' lowercase__ = [2, 3_12_27, 44_47, 35] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = { 'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
642
0
import inspect import tempfile from collections import OrderedDict, UserDict from collections.abc import MutableMapping from contextlib import ExitStack, contextmanager from dataclasses import fields from enum import Enum from typing import Any, ContextManager, List, Tuple import numpy as np from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy if is_flax_available(): import jax.numpy as jnp class UpperCAmelCase__( UpperCamelCase_ ): '''simple docstring''' def __get__( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str]=None) -> int: """simple docstring""" if obj is None: return self if self.fget is None: raise AttributeError('unreadable attribute') lowercase__ = '__cached_' + self.fget.__name__ lowercase__ = getattr(__a , __a , __a) if cached is None: lowercase__ = self.fget(__a) setattr(__a , __a , __a) return cached def _lowerCAmelCase ( A__ ): lowercase__ = val.lower() if val in {"y", "yes", "t", "true", "on", "1"}: return 1 if val in {"n", "no", "f", "false", "off", "0"}: return 0 raise ValueError(F'''invalid truth value {val!r}''' ) def _lowerCAmelCase ( A__ ): if is_torch_fx_proxy(A__ ): return True if is_torch_available(): import torch if isinstance(A__ , torch.Tensor ): return True if is_tf_available(): import tensorflow as tf if isinstance(A__ , tf.Tensor ): return True if is_flax_available(): import jax.numpy as jnp from jax.core import Tracer if isinstance(A__ , (jnp.ndarray, Tracer) ): return True return isinstance(A__ , np.ndarray ) def _lowerCAmelCase ( A__ ): return isinstance(A__ , np.ndarray ) def _lowerCAmelCase ( A__ ): return _is_numpy(A__ ) def _lowerCAmelCase ( A__ ): import torch return isinstance(A__ , torch.Tensor ) def _lowerCAmelCase ( A__ ): return False if not is_torch_available() else _is_torch(A__ ) def _lowerCAmelCase ( A__ ): import torch return isinstance(A__ , torch.device ) def _lowerCAmelCase ( A__ ): return False if not is_torch_available() else _is_torch_device(A__ ) def _lowerCAmelCase ( A__ ): import torch if isinstance(A__ , A__ ): if hasattr(A__ , A__ ): lowercase__ = getattr(A__ , A__ ) else: return False return isinstance(A__ , torch.dtype ) def _lowerCAmelCase ( A__ ): return False if not is_torch_available() else _is_torch_dtype(A__ ) def _lowerCAmelCase ( A__ ): import tensorflow as tf return isinstance(A__ , tf.Tensor ) def _lowerCAmelCase ( A__ ): return False if not is_tf_available() else _is_tensorflow(A__ ) def _lowerCAmelCase ( A__ ): import tensorflow as tf # the `is_symbolic_tensor` predicate is only available starting with TF 2.14 if hasattr(A__ , 'is_symbolic_tensor' ): return tf.is_symbolic_tensor(A__ ) return type(A__ ) == tf.Tensor def _lowerCAmelCase ( A__ ): return False if not is_tf_available() else _is_tf_symbolic_tensor(A__ ) def _lowerCAmelCase ( A__ ): import jax.numpy as jnp # noqa: F811 return isinstance(A__ , jnp.ndarray ) def _lowerCAmelCase ( A__ ): return False if not is_flax_available() else _is_jax(A__ ) def _lowerCAmelCase ( A__ ): if isinstance(A__ , (dict, UserDict) ): return {k: to_py_obj(A__ ) for k, v in obj.items()} elif isinstance(A__ , (list, tuple) ): return [to_py_obj(A__ ) for o in obj] elif is_tf_tensor(A__ ): return obj.numpy().tolist() elif is_torch_tensor(A__ ): return obj.detach().cpu().tolist() elif is_jax_tensor(A__ ): return np.asarray(A__ ).tolist() elif isinstance(A__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays return obj.tolist() else: return obj def _lowerCAmelCase ( A__ ): if isinstance(A__ , (dict, UserDict) ): return {k: to_numpy(A__ ) for k, v in obj.items()} elif isinstance(A__ , (list, tuple) ): return np.array(A__ ) elif is_tf_tensor(A__ ): return obj.numpy() elif is_torch_tensor(A__ ): return obj.detach().cpu().numpy() elif is_jax_tensor(A__ ): return np.asarray(A__ ) else: return obj class UpperCAmelCase__( UpperCamelCase_ ): '''simple docstring''' def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" lowercase__ = fields(self) # Safety and consistency checks if not len(__a): raise ValueError(f'''{self.__class__.__name__} has no fields.''') if not all(field.default is None for field in class_fields[1:]): raise ValueError(f'''{self.__class__.__name__} should not have more than one required field.''') lowercase__ = getattr(self , class_fields[0].name) lowercase__ = all(getattr(self , field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(__a): if isinstance(__a , __a): lowercase__ = first_field.items() lowercase__ = True else: try: lowercase__ = iter(__a) lowercase__ = True except TypeError: lowercase__ = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for idx, element in enumerate(__a): if ( not isinstance(__a , (list, tuple)) or not len(__a) == 2 or not isinstance(element[0] , __a) ): if idx == 0: # If we do not have an iterator of key/values, set it as attribute lowercase__ = first_field else: # If we have a mixed iterator, raise an error raise ValueError( f'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''') break setattr(self , element[0] , element[1]) if element[1] is not None: lowercase__ = element[1] elif first_field is not None: lowercase__ = first_field else: for field in class_fields: lowercase__ = getattr(self , field.name) if v is not None: lowercase__ = v def __delitem__( self : Dict , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple) -> Optional[Any]: """simple docstring""" raise Exception(f'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''') def UpperCAmelCase ( self : Union[str, Any] , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[Any]) -> Optional[Any]: """simple docstring""" raise Exception(f'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''') def UpperCAmelCase ( self : Dict , *lowerCAmelCase : Dict , **lowerCAmelCase : Tuple) -> List[str]: """simple docstring""" raise Exception(f'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''') def UpperCAmelCase ( self : Optional[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : List[Any]) -> Union[str, Any]: """simple docstring""" raise Exception(f'''You cannot use ``update`` on a {self.__class__.__name__} instance.''') def __getitem__( self : int , lowerCAmelCase : Dict) -> Optional[Any]: """simple docstring""" if isinstance(__a , __a): lowercase__ = dict(self.items()) return inner_dict[k] else: return self.to_tuple()[k] def __setattr__( self : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Dict) -> Optional[Any]: """simple docstring""" if name in self.keys() and value is not None: # Don't call self.__setitem__ to avoid recursion errors super().__setitem__(__a , __a) super().__setattr__(__a , __a) def __setitem__( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" super().__setitem__(__a , __a) # Don't call self.__setattr__ to avoid recursion errors super().__setattr__(__a , __a) def UpperCAmelCase ( self : Dict) -> Tuple[Any]: """simple docstring""" return tuple(self[k] for k in self.keys()) class UpperCAmelCase__( UpperCamelCase_ , UpperCamelCase_ ): '''simple docstring''' @classmethod def UpperCAmelCase ( cls : Tuple , lowerCAmelCase : Any) -> List[str]: """simple docstring""" raise ValueError( f'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys())}''') class UpperCAmelCase__( UpperCamelCase_ ): '''simple docstring''' A : List[str] = "longest" A : List[str] = "max_length" A : Tuple = "do_not_pad" class UpperCAmelCase__( UpperCamelCase_ ): '''simple docstring''' A : Optional[int] = "pt" A : Any = "tf" A : Any = "np" A : Dict = "jax" class UpperCAmelCase__: '''simple docstring''' def __init__( self : Dict , lowerCAmelCase : Optional[int]) -> Union[str, Any]: """simple docstring""" lowercase__ = context_managers lowercase__ = ExitStack() def __enter__( self : Optional[Any]) -> Any: """simple docstring""" for context_manager in self.context_managers: self.stack.enter_context(__a) def __exit__( self : Tuple , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[int]) -> int: """simple docstring""" self.stack.__exit__(*__a , **__a) def _lowerCAmelCase ( A__ ): lowercase__ = infer_framework(A__ ) if framework == "tf": lowercase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowercase__ = inspect.signature(model_class.forward ) # PyTorch models else: lowercase__ = inspect.signature(model_class.__call__ ) # Flax models for p in signature.parameters: if p == "return_loss" and signature.parameters[p].default is True: return True return False def _lowerCAmelCase ( A__ ): lowercase__ = model_class.__name__ lowercase__ = infer_framework(A__ ) if framework == "tf": lowercase__ = inspect.signature(model_class.call ) # TensorFlow models elif framework == "pt": lowercase__ = inspect.signature(model_class.forward ) # PyTorch models else: lowercase__ = inspect.signature(model_class.__call__ ) # Flax models if "QuestionAnswering" in model_name: return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")] else: return [p for p in signature.parameters if "label" in p] def _lowerCAmelCase ( A__ , A__ = "" , A__ = "." ): def _flatten_dict(A__ , A__="" , A__="." ): for k, v in d.items(): lowercase__ = str(A__ ) + delimiter + str(A__ ) if parent_key else k if v and isinstance(A__ , A__ ): yield from flatten_dict(A__ , A__ , delimiter=A__ ).items() else: yield key, v return dict(_flatten_dict(A__ , A__ , A__ ) ) @contextmanager def _lowerCAmelCase ( A__ , A__ = False ): if use_temp_dir: with tempfile.TemporaryDirectory() as tmp_dir: yield tmp_dir else: yield working_dir def _lowerCAmelCase ( A__ , A__=None ): if is_numpy_array(A__ ): return np.transpose(A__ , axes=A__ ) elif is_torch_tensor(A__ ): return array.T if axes is None else array.permute(*A__ ) elif is_tf_tensor(A__ ): import tensorflow as tf return tf.transpose(A__ , perm=A__ ) elif is_jax_tensor(A__ ): return jnp.transpose(A__ , axes=A__ ) else: raise ValueError(F'''Type not supported for transpose: {type(A__ )}.''' ) def _lowerCAmelCase ( A__ , A__ ): if is_numpy_array(A__ ): return np.reshape(A__ , A__ ) elif is_torch_tensor(A__ ): return array.reshape(*A__ ) elif is_tf_tensor(A__ ): import tensorflow as tf return tf.reshape(A__ , A__ ) elif is_jax_tensor(A__ ): return jnp.reshape(A__ , A__ ) else: raise ValueError(F'''Type not supported for reshape: {type(A__ )}.''' ) def _lowerCAmelCase ( A__ , A__=None ): if is_numpy_array(A__ ): return np.squeeze(A__ , axis=A__ ) elif is_torch_tensor(A__ ): return array.squeeze() if axis is None else array.squeeze(dim=A__ ) elif is_tf_tensor(A__ ): import tensorflow as tf return tf.squeeze(A__ , axis=A__ ) elif is_jax_tensor(A__ ): return jnp.squeeze(A__ , axis=A__ ) else: raise ValueError(F'''Type not supported for squeeze: {type(A__ )}.''' ) def _lowerCAmelCase ( A__ , A__ ): if is_numpy_array(A__ ): return np.expand_dims(A__ , A__ ) elif is_torch_tensor(A__ ): return array.unsqueeze(dim=A__ ) elif is_tf_tensor(A__ ): import tensorflow as tf return tf.expand_dims(A__ , axis=A__ ) elif is_jax_tensor(A__ ): return jnp.expand_dims(A__ , axis=A__ ) else: raise ValueError(F'''Type not supported for expand_dims: {type(A__ )}.''' ) def _lowerCAmelCase ( A__ ): if is_numpy_array(A__ ): return np.size(A__ ) elif is_torch_tensor(A__ ): return array.numel() elif is_tf_tensor(A__ ): import tensorflow as tf return tf.size(A__ ) elif is_jax_tensor(A__ ): return array.size else: raise ValueError(F'''Type not supported for expand_dims: {type(A__ )}.''' ) def _lowerCAmelCase ( A__ , A__ ): for key, value in auto_map.items(): if isinstance(A__ , (tuple, list) ): lowercase__ = [F'''{repo_id}--{v}''' if (v is not None and '--' not in v) else v for v in value] elif value is not None and "--" not in value: lowercase__ = F'''{repo_id}--{value}''' return auto_map def _lowerCAmelCase ( A__ ): for base_class in inspect.getmro(A__ ): lowercase__ = base_class.__module__ lowercase__ = base_class.__name__ if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel": return "tf" elif module.startswith('torch' ) or name == "PreTrainedModel": return "pt" elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel": return "flax" else: raise TypeError(F'''Could not infer framework from class {model_class}.''' )
717
import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCAmelCase__: '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" lowercase__ = data lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64) lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data)) return padded_data def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64) ] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]: """simple docstring""" lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64 for i in range(16 , 80): lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1) return w def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.padding() lowercase__ = self.split_blocks() for block in self.blocks: lowercase__ = self.expand_block(lowerCAmelCase) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h for i in range(0 , 80): if 0 <= i < 20: lowercase__ = (b & c) | ((~b) & d) lowercase__ = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase__ = b ^ c ^ d lowercase__ = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase__ = (b & c) | (b & d) | (c & d) lowercase__ = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase__ = b ^ c ^ d lowercase__ = 0XC_A_6_2_C_1_D_6 lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = ( self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(lowerCAmelCase , 30), c, d, ) lowercase__ = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h) def _lowerCAmelCase ( ): lowercase__ = B'Test String' assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def _lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowercase__ = parser.parse_args() lowercase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase__ = f.read() else: lowercase__ = bytes(A__ , 'utf-8' ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
642
0
def _lowerCAmelCase ( ): lowercase__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] lowercase__ = 6 lowercase__ = 1 lowercase__ = 1_901 lowercase__ = 0 while year < 2_001: day += 7 if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0): if day > days_per_month[month - 1] and month != 2: month += 1 lowercase__ = day - days_per_month[month - 2] elif day > 29 and month == 2: month += 1 lowercase__ = day - 29 else: if day > days_per_month[month - 1]: month += 1 lowercase__ = day - days_per_month[month - 2] if month > 12: year += 1 lowercase__ = 1 if year < 2_001 and day == 1: sundays += 1 return sundays if __name__ == "__main__": print(solution())
718
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart a__ : List[Any] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } a__ : int = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Optional[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : int = ["input_ids", "attention_mask"] A : Any = BartTokenizer def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type')) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**lowerCAmelCase) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = 'post_processor' lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state['sep']) if "cls" in state: lowercase__ = tuple(state['cls']) lowercase__ = False if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(lowerCAmelCase , state.pop('type')) lowercase__ = component_class(**lowerCAmelCase) setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) @property def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]: """simple docstring""" lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value lowercase__ = value def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase) return tuple(lowerCAmelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple: """simple docstring""" lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
642
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function a__ : Optional[int] = 1.0_54_57_18_17E-34 # unit of ℏ : J * s a__ : Dict = 3E8 # unit of c : m * s^-1 def _lowerCAmelCase ( A__ , A__ , A__ ): if (force, area, distance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if force < 0: raise ValueError('Magnitude of force can not be negative' ) if distance < 0: raise ValueError('Distance can not be negative' ) if area < 0: raise ValueError('Area can not be negative' ) if force == 0: lowercase__ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: lowercase__ = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: lowercase__ = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('One and only one argument must be 0' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
719
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : str = (DDIMParallelScheduler,) A : Any = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowerCAmelCase) return config def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**lowerCAmelCase) lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase) for t in scheduler.timesteps: lowercase__ = model(lowerCAmelCase , lowerCAmelCase) lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample return sample def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1) lowercase__ = scheduler_class(**lowerCAmelCase) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1])) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , ) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]): self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]): self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5 def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 scheduler.set_timesteps(lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = self.dummy_sample_deter + 0.1 lowercase__ = self.dummy_sample_deter - 0.1 lowercase__ = samplea.shape[0] lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0) lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase) lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 11_47.79_04) < 1E-2 assert abs(result_mean.item() - 0.49_82) < 1E-3 def UpperCAmelCase ( self : Any) -> int: """simple docstring""" lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_72.00_67) < 1E-2 assert abs(result_mean.item() - 0.22_39_67) < 1E-3 def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(prediction_type='v_prediction') lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 52.53_02) < 1E-2 assert abs(result_mean.item() - 0.06_84) < 1E-3 def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.82_95) < 1E-2 assert abs(result_mean.item() - 0.19_51) < 1E-3 def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.07_84) < 1E-2 assert abs(result_mean.item() - 0.19_41) < 1E-3
642
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a__ : str = { 'configuration_blip_2': [ 'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Blip2Config', 'Blip2QFormerConfig', 'Blip2VisionConfig', ], 'processing_blip_2': ['Blip2Processor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ 'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Blip2Model', 'Blip2QFormerModel', 'Blip2PreTrainedModel', 'Blip2ForConditionalGeneration', 'Blip2VisionModel', ] if TYPE_CHECKING: from .configuration_blip_a import ( BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipaConfig, BlipaQFormerConfig, BlipaVisionConfig, ) from .processing_blip_a import BlipaProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blip_a import ( BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST, BlipaForConditionalGeneration, BlipaModel, BlipaPreTrainedModel, BlipaQFormerModel, BlipaVisionModel, ) else: import sys a__ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
720
import cva import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict: """simple docstring""" if k in (0.04, 0.06): lowercase__ = k lowercase__ = window_size else: raise ValueError('invalid k value') def __str__( self : Tuple) -> str: """simple docstring""" return str(self.k) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowercase__ = cva.imread(lowerCAmelCase , 0) lowercase__, lowercase__ = img.shape lowercase__ = [] lowercase__ = img.copy() lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB) lowercase__, lowercase__ = np.gradient(lowerCAmelCase) lowercase__ = dx**2 lowercase__ = dy**2 lowercase__ = dx * dy lowercase__ = 0.04 lowercase__ = self.window_size // 2 for y in range(lowerCAmelCase , h - offset): for x in range(lowerCAmelCase , w - offset): lowercase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = (wxx * wyy) - (wxy**2) lowercase__ = wxx + wyy lowercase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0) , 0) color_img.itemset((y, x, 1) , 0) color_img.itemset((y, x, 2) , 2_55) return color_img, corner_list if __name__ == "__main__": a__ : Dict = HarrisCorner(0.0_4, 3) a__ , a__ : Dict = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
642
0
import itertools import math def _lowerCAmelCase ( A__ ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _lowerCAmelCase ( ): lowercase__ = 2 while True: if is_prime(lowerCAmelCase__ ): yield num num += 1 def _lowerCAmelCase ( A__ = 10_001 ): return next(itertools.islice(prime_generator() , nth - 1 , lowerCAmelCase__ ) ) if __name__ == "__main__": print(F'''{solution() = }''')
721
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : List[Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : int = "speech_to_text" A : Optional[Any] = ["past_key_values"] A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict: """simple docstring""" lowercase__ = vocab_size lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = max_source_positions lowercase__ = max_target_positions lowercase__ = num_conv_layers lowercase__ = list(lowerCAmelCase) lowercase__ = conv_channels lowercase__ = input_feat_per_channel lowercase__ = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''') super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
642
0
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Dict = IFInpaintingSuperResolutionPipeline A : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} A : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} ) A : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"} def UpperCAmelCase ( self : List[str]) -> List[Any]: """simple docstring""" return self._get_superresolution_dummy_components() def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any]=0) -> str: """simple docstring""" if str(UpperCamelCase__).startswith('mps'): lowercase__ = torch.manual_seed(UpperCamelCase__) else: lowercase__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) lowercase__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) lowercase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) lowercase__ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA') def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" super().test_save_load_floataa(expected_max_diff=1E-1) def UpperCAmelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" self._test_save_load_local() def UpperCAmelCase ( self : Tuple) -> Optional[int]: """simple docstring""" self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class UpperCAmelCase__: '''simple docstring''' def __init__( self : str , lowerCAmelCase : int , lowerCAmelCase : int=3 , lowerCAmelCase : List[str]=32 , lowerCAmelCase : Tuple=3 , lowerCAmelCase : Tuple=10 , lowerCAmelCase : List[str]=[8, 16, 32, 64] , lowerCAmelCase : str=[1, 1, 2, 1] , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str="relu" , lowerCAmelCase : Dict=3 , lowerCAmelCase : str=None , lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , lowerCAmelCase : Dict=[2, 3, 4] , lowerCAmelCase : List[Any]=1 , ) -> Tuple: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = num_channels lowercase__ = embeddings_size lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_act lowercase__ = num_labels lowercase__ = scope lowercase__ = len(lowerCAmelCase) lowercase__ = out_features lowercase__ = out_indices lowercase__ = num_groups def UpperCAmelCase ( self : Dict) -> str: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.num_labels) lowercase__ = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : List[str]) -> Any: """simple docstring""" lowercase__ = BitModel(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any]) -> str: """simple docstring""" lowercase__ = self.num_labels lowercase__ = BitForImageClassification(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple) -> Optional[Any]: """simple docstring""" lowercase__ = BitBackbone(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:]) # verify backbone works with out_features=None lowercase__ = None lowercase__ = BitBackbone(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__, lowercase__, lowercase__ = config_and_inputs lowercase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase__( snake_case_ , snake_case_ , unittest.TestCase ): '''simple docstring''' A : List[Any] = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () A : Union[str, Any] = ( {"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification} if is_torch_available() else {} ) A : Dict = False A : Optional[Any] = False A : Optional[Any] = False A : Optional[int] = False A : str = False def UpperCAmelCase ( self : Union[str, Any]) -> Any: """simple docstring""" lowercase__ = BitModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCAmelCase ( self : Tuple) -> Tuple: """simple docstring""" return @unittest.skip(reason='Bit does not output attentions') def UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]: """simple docstring""" pass @unittest.skip(reason='Bit does not use inputs_embeds') def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" pass @unittest.skip(reason='Bit does not support input and output embeddings') def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" pass def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(lowerCAmelCase) lowercase__ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> List[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(config=lowerCAmelCase) for name, module in model.named_modules(): if isinstance(lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm)): self.assertTrue( torch.all(module.weight == 1) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) self.assertTrue( torch.all(module.bias == 0) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , ) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" def check_hidden_states_output(lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any]): lowercase__ = model_class(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() with torch.no_grad(): lowercase__ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase)) lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__ = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase) , expected_num_stages + 1) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__, lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ = ['preactivation', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__ = layer_type lowercase__ = True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ = True check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) @unittest.skip(reason='Bit does not use feedforward chunking') def UpperCAmelCase ( self : Dict) -> Optional[Any]: """simple docstring""" pass def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase) @slow def UpperCAmelCase ( self : str) -> Union[str, Any]: """simple docstring""" for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = BitModel.from_pretrained(lowerCAmelCase) self.assertIsNotNone(lowerCAmelCase) def _lowerCAmelCase ( ): lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(lowerCAmelCase) lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=lowerCAmelCase , return_tensors='pt').to(lowerCAmelCase) # forward pass with torch.no_grad(): lowercase__ = model(**lowerCAmelCase) # verify the logits lowercase__ = torch.Size((1, 10_00)) self.assertEqual(outputs.logits.shape , lowerCAmelCase) lowercase__ = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]]).to(lowerCAmelCase) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4)) @require_torch class UpperCAmelCase__( snake_case_ , unittest.TestCase ): '''simple docstring''' A : int = (BitBackbone,) if is_torch_available() else () A : Any = BitConfig A : List[Any] = False def UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" lowercase__ = BitModelTester(self)
701
# Imports import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int: """simple docstring""" if red is not None: lowercase__ = red if green is not None: lowercase__ = green if blue is not None: lowercase__ = blue if red_edge is not None: lowercase__ = red_edge if nir is not None: lowercase__ = nir return True def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) lowercase__ = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!') return False def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def UpperCAmelCase ( self : int) -> Any: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" return self.nir * (self.red / (self.green**2)) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return (self.nir / self.green) - 1 def UpperCAmelCase ( self : Any) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" return (self.red - self.blue) / self.red def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" return self.nir - self.green def UpperCAmelCase ( self : Tuple) -> List[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def UpperCAmelCase ( self : str) -> int: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple: """simple docstring""" return (self.nir - b) / (a * self.red) def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def UpperCAmelCase ( self : int) -> str: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return self.green / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" return self.red / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max_value - min_value) / max_value def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
642
0
'''simple docstring''' class UpperCAmelCase__: '''simple docstring''' def __init__( self : int , lowerCAmelCase : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = set_counts lowercase__ = max(lowercase__) lowercase__ = len(lowercase__) lowercase__ = [1] * num_sets lowercase__ = list(range(lowercase__)) def UpperCAmelCase ( self : str , lowerCAmelCase : Dict , lowerCAmelCase : Any) -> List[str]: """simple docstring""" lowercase__ = self.get_parent(lowercase__) lowercase__ = self.get_parent(lowercase__) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase__ = 0 lowercase__ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase__ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase__ = 0 lowercase__ = src_parent lowercase__ = self.set_counts[src_parent] lowercase__ = max(self.max_set , lowercase__) return True def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any]) -> Union[str, Any]: """simple docstring""" if self.parents[disj_set] == disj_set: return disj_set lowercase__ = self.get_parent(self.parents[disj_set]) return self.parents[disj_set]
702
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ): '''simple docstring''' def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" lowercase__ = load_tool('text-classification') self.tool.setup() lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Tuple: """simple docstring""" lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive')
642
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin a__ : Any = False @skip_mps class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = StableDiffusionAttendAndExcitePipeline A : List[str] = False A : Tuple = TEXT_TO_IMAGE_PARAMS A : Any = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} ) A : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS A : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def UpperCAmelCase ( cls : List[Any]) -> int: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(lowerCAmelCase) @classmethod def UpperCAmelCase ( cls : List[str]) -> List[Any]: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" torch.manual_seed(0) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCAmelCase , ) lowercase__ = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , ) torch.manual_seed(0) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , ) lowercase__ = CLIPTextModel(lowerCAmelCase) lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') lowercase__ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple=0) -> Optional[Any]: """simple docstring""" if str(lowerCAmelCase).startswith('mps'): lowercase__ = torch.manual_seed(lowerCAmelCase) else: lowercase__ = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase) lowercase__ = { 'prompt': 'a cat and a frog', 'token_indices': [2, 5], 'generator': generator, 'num_inference_steps': 1, 'guidance_scale': 6.0, 'output_type': 'numpy', 'max_iter_to_alter': 2, 'thresholds': {0: 0.7}, } return inputs def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" lowercase__ = 'cpu' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**lowerCAmelCase) pipe.to(lowerCAmelCase) pipe.set_progress_bar_config(disable=lowerCAmelCase) lowercase__ = self.get_dummy_inputs(lowerCAmelCase) lowercase__ = pipe(**lowerCAmelCase).images lowercase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3)) lowercase__ = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96]) lowercase__ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(lowerCAmelCase , 1E-3) def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" super().test_cpu_offload_forward_pass(expected_max_diff=5E-4) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3) def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4) def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" super().test_save_load_local(expected_max_difference=5E-4) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" super().test_save_load_optional_components(expected_max_difference=4E-4) @require_torch_gpu @slow class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCAmelCase ( cls : Any) -> Tuple: """simple docstring""" super().setUpClass() torch.use_deterministic_algorithms(lowerCAmelCase) @classmethod def UpperCAmelCase ( cls : Union[str, Any]) -> Tuple: """simple docstring""" super().tearDownClass() torch.use_deterministic_algorithms(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" lowercase__ = torch.manual_seed(51) lowercase__ = StableDiffusionAttendAndExcitePipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4' , safety_checker=lowerCAmelCase , torch_dtype=torch.floataa) pipe.to('cuda') lowercase__ = 'a painting of an elephant with glasses' lowercase__ = [5, 7] lowercase__ = pipe( prompt=lowerCAmelCase , token_indices=lowerCAmelCase , guidance_scale=7.5 , generator=lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0] lowercase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy') assert np.abs((expected_image - image).max()) < 5E-1
703
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[Any] = None A : Optional[int] = None @property def UpperCAmelCase ( self : str) -> Union[str, Any]: """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase ( self : int) -> Any: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(lowerCAmelCase , 'feature_size')) self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate')) self.assertTrue(hasattr(lowerCAmelCase , 'padding_value')) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name]))) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_torch def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_tf def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = self.feat_extract_tester.seq_length_diff lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff lowercase__ = self.feat_extract_tester.min_seq_length lowercase__ = self.feat_extract_tester.batch_size lowercase__ = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest') lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1])) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') lowercase__ = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np') lowercase__ = input_a[input_name] self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) self.assertTrue(len(input_a[0]) == pad_min_length) self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0]))) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a)) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size) # Check padding value is correct lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1E-3) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) # truncate to smallest lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0])) lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to smallest with np lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(input_a.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to middle lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowercase__ = 12 lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , ) lowercase__ = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowercase__ = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) @require_torch def UpperCAmelCase ( self : Dict) -> List[str]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2) @require_tf def UpperCAmelCase ( self : str) -> str: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2) def UpperCAmelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = min(lowerCAmelCase) lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual( list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length]) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
642
0
import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase__: '''simple docstring''' def __init__( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : str=7 , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=False , lowerCAmelCase : str=False , lowerCAmelCase : int=False , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Any=99 , lowerCAmelCase : Any=0 , lowerCAmelCase : Any=32 , lowerCAmelCase : List[str]=5 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Optional[Any]=5_12 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Any=2 , lowerCAmelCase : Optional[int]=4 , lowerCAmelCase : Dict="last" , lowerCAmelCase : int=True , lowerCAmelCase : int=None , lowerCAmelCase : Any=0 , ) -> Union[str, Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_lengths lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = gelu_activation lowercase__ = sinusoidal_embeddings lowercase__ = causal lowercase__ = asm lowercase__ = n_langs lowercase__ = vocab_size lowercase__ = n_special lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = summary_type lowercase__ = use_proj lowercase__ = scope lowercase__ = bos_token_id def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_input_lengths: lowercase__ = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , 2).float() lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , ) -> List[str]: """simple docstring""" lowercase__ = XLMModel(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase) lowercase__ = model(lowerCAmelCase , langs=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , ) -> Dict: """simple docstring""" lowercase__ = XLMWithLMHeadModel(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Dict , ) -> int: """simple docstring""" lowercase__ = XLMForQuestionAnsweringSimple(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase) lowercase__ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" lowercase__ = XLMForQuestionAnswering(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase) lowercase__ = model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , ) lowercase__ = model( lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , ) (lowercase__ ) = result_with_labels.to_tuple() lowercase__ = model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase) (lowercase__ ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Any , ) -> Optional[Any]: """simple docstring""" lowercase__ = XLMForSequenceClassification(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , ) -> List[str]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = XLMForTokenClassification(lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] , ) -> Optional[int]: """simple docstring""" lowercase__ = self.num_choices lowercase__ = XLMForMultipleChoice(config=lowerCAmelCase) model.to(lowerCAmelCase) model.eval() lowercase__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ = model( lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : str) -> List[str]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( lowercase__ ) = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class UpperCAmelCase__( __A , __A , __A , unittest.TestCase ): '''simple docstring''' A : Optional[int] = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) A : List[str] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable A : Optional[Any] = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int]) -> Any: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=False) -> Any: """simple docstring""" lowercase__ = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase) lowercase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase) return inputs_dict def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" lowercase__ = XLMModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase , emb_dim=37) def UpperCAmelCase ( self : List[Any]) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Optional[int]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> Any: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase) def UpperCAmelCase ( self : int , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=False , lowerCAmelCase : Tuple=1) -> Tuple: """simple docstring""" self.assertIsInstance(lowerCAmelCase , lowerCAmelCase) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase) for iter_attentions in attentions] , [True] * len(lowerCAmelCase)) self.assertEqual(len(lowerCAmelCase) , (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(lowerCAmelCase): # adds PAD dummy token lowercase__ = min_length + idx + 1 lowercase__ = min_length + idx + 1 lowercase__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str]=False , lowerCAmelCase : Tuple=1) -> Optional[Any]: """simple docstring""" self.assertIsInstance(lowerCAmelCase , lowerCAmelCase) self.assertListEqual( [isinstance(lowerCAmelCase , lowerCAmelCase) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase) , ) self.assertEqual(len(lowerCAmelCase) , (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(lowerCAmelCase): # adds PAD dummy token lowercase__ = min_length + idx + 1 lowercase__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase) , ) pass @slow def UpperCAmelCase ( self : Tuple) -> Dict: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = XLMModel.from_pretrained(lowerCAmelCase) self.assertIsNotNone(lowerCAmelCase) @require_torch class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') model.to(lowerCAmelCase) lowercase__ = torch.tensor([[14, 4_47]] , dtype=torch.long , device=lowerCAmelCase) # the president lowercase__ = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase__ = model.generate(lowerCAmelCase , do_sample=lowerCAmelCase) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase)
704
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowerCAmelCase ( A__ ): lowercase__ = prime_factors(A__ ) if is_square_free(A__ ): return -1 if len(A__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
642
0
def _lowerCAmelCase ( A__ ): for i in range(len(__snake_case ) - 1 , 0 , -1 ): lowercase__ = False for j in range(__snake_case , 0 , -1 ): if unsorted[j] < unsorted[j - 1]: lowercase__, lowercase__ = unsorted[j - 1], unsorted[j] lowercase__ = True for j in range(__snake_case ): if unsorted[j] > unsorted[j + 1]: lowercase__, lowercase__ = unsorted[j + 1], unsorted[j] lowercase__ = True if not swapped: break return unsorted if __name__ == "__main__": import doctest doctest.testmod() a__ : Optional[int] = input("Enter numbers separated by a comma:\n").strip() a__ : Tuple = [int(item) for item in user_input.split(",")] print(F'''{cocktail_shaker_sort(unsorted) = }''')
705
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ : List[str] = logging.get_logger(__name__) a__ : List[Any] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ): '''simple docstring''' A : List[str] = "focalnet" def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = use_conv_embed lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = focal_levels lowercase__ = focal_windows lowercase__ = hidden_act lowercase__ = mlp_ratio lowercase__ = hidden_dropout_prob lowercase__ = drop_path_rate lowercase__ = use_layerscale lowercase__ = layerscale_value lowercase__ = use_post_layernorm lowercase__ = use_post_layernorm_in_modulation lowercase__ = normalize_modulator lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = encoder_stride lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)] lowercase__, lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
642
0
import os def _lowerCAmelCase ( ): with open(os.path.dirname(lowerCAmelCase__ ) + '/grid.txt' ) as f: lowercase__ = [] # noqa: E741 for _ in range(20 ): l.append([int(lowerCAmelCase__ ) for x in f.readline().split()] ) lowercase__ = 0 # right for i in range(20 ): for j in range(17 ): lowercase__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowercase__ = temp # down for i in range(17 ): for j in range(20 ): lowercase__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowercase__ = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowercase__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowercase__ = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowercase__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowercase__ = temp return maximum if __name__ == "__main__": print(solution())
706
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Dict = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } a__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } a__ : Any = {"facebook/blenderbot_small-90M": 5_12} def _lowerCAmelCase ( A__ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char lowercase__ = set(A__ ) return pairs class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[str] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase) with open(lowerCAmelCase , encoding='utf-8') as vocab_handle: lowercase__ = json.load(lowerCAmelCase) lowercase__ = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase , encoding='utf-8') as merges_handle: lowercase__ = merges_handle.read().split('\n')[1:-1] lowercase__ = [tuple(merge.split()) for merge in merges] lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = {} @property def UpperCAmelCase ( self : int) -> int: """simple docstring""" return len(self.encoder) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase) lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase) lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase) if "\n" in token: lowercase__ = token.replace('\n' , ' __newln__') lowercase__ = token.split(' ') lowercase__ = [] for token in tokens: if not len(lowerCAmelCase): continue lowercase__ = token.lower() lowercase__ = tuple(lowerCAmelCase) lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>']) lowercase__ = get_pairs(lowerCAmelCase) if not pairs: words.append(lowerCAmelCase) continue while True: lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf'))) if bigram not in self.bpe_ranks: break lowercase__, lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(lowerCAmelCase): try: lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase) new_word.extend(word[i:j]) lowercase__ = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 lowercase__ = tuple(lowerCAmelCase) lowercase__ = new_word if len(lowerCAmelCase) == 1: break else: lowercase__ = get_pairs(lowerCAmelCase) lowercase__ = '@@ '.join(lowerCAmelCase) lowercase__ = word[:-4] lowercase__ = word words.append(lowerCAmelCase) return " ".join(lowerCAmelCase) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' '))) return split_tokens def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int: """simple docstring""" lowercase__ = token.lower() return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token)) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str: """simple docstring""" return self.decoder.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str: """simple docstring""" lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip() return out_string def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(lowerCAmelCase , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n') lowercase__ = 0 with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!') lowercase__ = token_index writer.write(' '.join(lowerCAmelCase) + '\n') index += 1 return vocab_file, merge_file
642
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def _lowerCAmelCase ( A__ ): lowercase__ = int(number**0.5 ) return number == sq * sq def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ ): lowercase__ = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowercase__ = x_den * y_den * z_den lowercase__ = gcd(lowerCAmelCase_ , lowerCAmelCase_ ) top //= hcf bottom //= hcf return top, bottom def _lowerCAmelCase ( A__ = 35 ): lowercase__ = set() lowercase__ = 42 lowercase__ = Fraction(0 ) lowercase__ = 42 for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowercase__ = x_num * y_den + x_den * y_num lowercase__ = x_den * y_den lowercase__ = gcd(lowerCAmelCase_ , lowerCAmelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) unique_s.add(lowerCAmelCase_ ) # n=2 lowercase__ = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowercase__ = x_den * x_den * y_den * y_den if is_sq(lowerCAmelCase_ ) and is_sq(lowerCAmelCase_ ): lowercase__ = int(sqrt(lowerCAmelCase_ ) ) lowercase__ = int(sqrt(lowerCAmelCase_ ) ) lowercase__ = gcd(lowerCAmelCase_ , lowerCAmelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) unique_s.add(lowerCAmelCase_ ) # n=-1 lowercase__ = x_num * y_num lowercase__ = x_den * y_num + x_num * y_den lowercase__ = gcd(lowerCAmelCase_ , lowerCAmelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) unique_s.add(lowerCAmelCase_ ) # n=2 lowercase__ = x_num * x_num * y_num * y_num lowercase__ = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(lowerCAmelCase_ ) and is_sq(lowerCAmelCase_ ): lowercase__ = int(sqrt(lowerCAmelCase_ ) ) lowercase__ = int(sqrt(lowerCAmelCase_ ) ) lowercase__ = gcd(lowerCAmelCase_ , lowerCAmelCase_ ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowercase__ = add_three( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) unique_s.add(lowerCAmelCase_ ) for num, den in unique_s: total += Fraction(lowerCAmelCase_ , lowerCAmelCase_ ) return total.denominator + total.numerator if __name__ == "__main__": print(F'''{solution() = }''')
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = MgpstrTokenizer A : List[str] = False A : Tuple = {} A : Optional[Any] = False def UpperCAmelCase ( self : str) -> Optional[Any]: """simple docstring""" super().setUp() # fmt: off lowercase__ = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # fmt: on lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(lowerCAmelCase) + '\n') def UpperCAmelCase ( self : Optional[Any] , **lowerCAmelCase : Any) -> List[Any]: """simple docstring""" return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Union[str, Any]) -> List[str]: """simple docstring""" lowercase__ = 'tester' lowercase__ = 'tester' return input_text, output_text @unittest.skip('MGP-STR always lower cases letters.') def UpperCAmelCase ( self : str) -> Any: """simple docstring""" pass def UpperCAmelCase ( self : Tuple) -> Dict: """simple docstring""" lowercase__ = self.get_tokenizers(do_lower_case=lowerCAmelCase) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}'''): lowercase__ = '[SPECIAL_TOKEN]' tokenizer.add_special_tokens({'cls_token': special_token}) lowercase__ = tokenizer.encode([special_token] , add_special_tokens=lowerCAmelCase) self.assertEqual(len(lowerCAmelCase) , 1) lowercase__ = tokenizer.decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase) self.assertTrue(special_token not in decoded) def UpperCAmelCase ( self : Optional[int]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}'''): lowercase__, lowercase__ = self.get_input_output_texts(lowerCAmelCase) lowercase__ = tokenizer.tokenize(lowerCAmelCase) lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertNotEqual(len(lowerCAmelCase) , 0) lowercase__ = tokenizer.decode(lowerCAmelCase) self.assertIsInstance(lowerCAmelCase , lowerCAmelCase) self.assertEqual(text_a.replace(' ' , '') , lowerCAmelCase) @unittest.skip('MGP-STR tokenizer only handles one sequence.') def UpperCAmelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" pass @unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer') def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" pass
708
import heapq import sys import numpy as np a__ : Dict = tuple[int, int] class UpperCAmelCase__: '''simple docstring''' def __init__( self : List[str]) -> Any: """simple docstring""" lowercase__ = [] lowercase__ = set() def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float('inf') def UpperCAmelCase ( self : int) -> str: """simple docstring""" return len(self.elements) == 0 def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(lowerCAmelCase) else: # update # print("update", item) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple: """simple docstring""" if item in self.set: self.set.remove(lowerCAmelCase) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" return self.elements[0][1] def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) self.set.remove(lowerCAmelCase) return (priority, item) def _lowerCAmelCase ( A__ , A__ ): # euclidean distance lowercase__ = np.array(A__ ) lowercase__ = np.array(A__ ) return np.linalg.norm(a - b ) def _lowerCAmelCase ( A__ , A__ ): # integer division by time variable return consistent_heuristic(A__ , A__ ) // t def _lowerCAmelCase ( A__ , A__ ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ ) return ans def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = np.chararray((n, n) ) for i in range(A__ ): for j in range(A__ ): lowercase__ = '*' for i in range(A__ ): for j in range(A__ ): if (j, (n - 1) - i) in blocks: lowercase__ = '#' lowercase__ = '-' lowercase__ = back_pointer[goal] while x != start: ((lowercase__), (lowercase__)) = x # print(x) lowercase__ = '-' lowercase__ = back_pointer[x] lowercase__ = '-' for i in range(A__ ): for j in range(A__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) lowercase__ = back_pointer[goal] while x != start: print(A__ , end=' ' ) lowercase__ = back_pointer[x] print(A__ ) sys.exit() def _lowerCAmelCase ( A__ ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ): for itera in range(A__ ): open_list[itera].remove_element(A__ ) # print("s", s) # print("j", j) ((lowercase__), (lowercase__)) = s lowercase__ = (x - 1, y) lowercase__ = (x + 1, y) lowercase__ = (x, y + 1) lowercase__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(A__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(A__ ) lowercase__ = -1 lowercase__ = float('inf' ) if valid(A__ ) and g_function[neighbours] > g_function[s] + 1: lowercase__ = g_function[s] + 1 lowercase__ = s if neighbours not in close_list_anchor: open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) ) if neighbours not in close_list_inad: for var in range(1 , A__ ): if key(A__ , A__ , A__ , A__ ) <= Wa * key( A__ , 0 , A__ , A__ ): open_list[j].put( A__ , key(A__ , A__ , A__ , A__ ) ) def _lowerCAmelCase ( ): lowercase__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} a__ : Any = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] a__ : Any = make_common_ground() a__ : Union[str, Any] = blocks_blk # hyper parameters a__ : List[Any] = 1 a__ : List[str] = 1 a__ : Optional[int] = 20 a__ : Optional[Any] = 3 # one consistent and two other inconsistent # start and end destination a__ : Tuple = (0, 0) a__ : str = (n - 1, n - 1) a__ : Optional[Any] = 1 def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = {start: 0, goal: float('inf' )} lowercase__ = {start: -1, goal: -1} lowercase__ = [] lowercase__ = set() for i in range(A__ ): open_list.append(PriorityQueue() ) open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) ) lowercase__ = [] lowercase__ = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , A__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__, lowercase__ = open_list[i].top_show() visited.add(A__ ) expand_state( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_inad.append(A__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__ = open_list[0].top_show() visited.add(A__ ) expand_state( A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_anchor.append(A__ ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(A__ ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
642
0
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, # U and V such that every edge (u, v) either connects a vertex from U to V or a vertex # from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, # or u belongs to V and v to U. We can also say that there is no edge that connects # vertices of same set. def _lowerCAmelCase ( A__ ): lowercase__ = [False] * len(_A ) lowercase__ = [-1] * len(_A ) def dfs(A__ , A__ ): lowercase__ = True lowercase__ = c for u in graph[v]: if not visited[u]: dfs(_A , 1 - c ) for i in range(len(_A ) ): if not visited[i]: dfs(_A , 0 ) for i in range(len(_A ) ): for j in graph[i]: if color[i] == color[j]: return False return True # Adjacency list of graph a__ : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []} print(check_bipartite_dfs(graph))
709
import math import sys def _lowerCAmelCase ( A__ ): lowercase__ = '' try: with open(A__ , 'rb' ) as binary_file: lowercase__ = binary_file.read() for dat in data: lowercase__ = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = {'0': '0', '1': '1'} lowercase__, lowercase__ = '', '' lowercase__ = len(A__ ) for i in range(len(A__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowercase__ = lexicon[curr_string] result += last_match_id lowercase__ = last_match_id + '0' if math.loga(A__ ).is_integer(): lowercase__ = {} for curr_key in list(A__ ): lowercase__ = lexicon.pop(A__ ) lowercase__ = new_lex lowercase__ = last_match_id + '1' index += 1 lowercase__ = '' return result def _lowerCAmelCase ( A__ , A__ ): lowercase__ = 8 try: with open(A__ , 'wb' ) as opened_file: lowercase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A__ ) , A__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowercase__ = data_bits[counter:] lowercase__ = data_bits[counter + 1 :] return data_bits def _lowerCAmelCase ( A__ , A__ ): lowercase__ = read_file_binary(A__ ) lowercase__ = remove_prefix(A__ ) lowercase__ = decompress_data(A__ ) write_file_binary(A__ , A__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
642
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Tuple = logging.get_logger(__name__) a__ : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase__( _A ): '''simple docstring''' A : Union[str, Any] = "lilt" def __init__( self : Dict , lowerCAmelCase : List[str]=3_05_22 , lowerCAmelCase : Optional[Any]=7_68 , lowerCAmelCase : Dict=12 , lowerCAmelCase : Optional[Any]=12 , lowerCAmelCase : Dict=30_72 , lowerCAmelCase : Tuple="gelu" , lowerCAmelCase : int=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Union[str, Any]=5_12 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Any=1E-1_2 , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : str="absolute" , lowerCAmelCase : Any=None , lowerCAmelCase : Dict=4 , lowerCAmelCase : Tuple=10_24 , **lowerCAmelCase : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__) lowercase__ = vocab_size lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_act lowercase__ = intermediate_size lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = position_embedding_type lowercase__ = classifier_dropout lowercase__ = channel_shrink_ratio lowercase__ = max_ad_position_embeddings
710
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging a__ : int = logging.get_logger(__name__) a__ : Tuple = {"vocab_file": "vocab.txt"} a__ : int = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } a__ : Dict = { "facebook/esm2_t6_8M_UR50D": 10_24, "facebook/esm2_t12_35M_UR50D": 10_24, } def _lowerCAmelCase ( A__ ): with open(A__ , 'r' ) as f: lowercase__ = f.read().splitlines() return [l.strip() for l in lines] class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = load_vocab_file(lowerCAmelCase) lowercase__ = dict(enumerate(self.all_tokens)) lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)} lowercase__ = unk_token lowercase__ = cls_token lowercase__ = pad_token lowercase__ = mask_token lowercase__ = eos_token lowercase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" return text.split() def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]: """simple docstring""" return len(self._id_to_token) def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens)} def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.cls_token_id] lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!') return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1] if token_ids_a is not None: mask += [0] * len(lowerCAmelCase) + [1] return mask def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt') with open(lowerCAmelCase , 'w') as f: f.write('\n'.join(self.all_tokens)) return (vocab_file,) @property def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int: """simple docstring""" return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
642
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class UpperCAmelCase__( snake_case__ , unittest.TestCase ): '''simple docstring''' A : Any = ShapEPipeline A : str = ['''prompt'''] A : Tuple = ['''prompt'''] A : Optional[int] = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] A : int = False @property def UpperCAmelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" return 32 @property def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return 32 @property def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def UpperCAmelCase ( self : Tuple) -> Optional[int]: """simple docstring""" return 8 @property def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') return tokenizer @property def UpperCAmelCase ( self : Tuple) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) return CLIPTextModelWithProjection(_A) @property def UpperCAmelCase ( self : List[Any]) -> List[str]: """simple docstring""" torch.manual_seed(0) lowercase__ = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } lowercase__ = PriorTransformer(**_A) return model @property def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" torch.manual_seed(0) lowercase__ = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } lowercase__ = ShapERenderer(**_A) return model def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" lowercase__ = self.dummy_prior lowercase__ = self.dummy_text_encoder lowercase__ = self.dummy_tokenizer lowercase__ = self.dummy_renderer lowercase__ = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , ) lowercase__ = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str=0) -> Tuple: """simple docstring""" if str(_A).startswith('mps'): lowercase__ = torch.manual_seed(_A) else: lowercase__ = torch.Generator(device=_A).manual_seed(_A) lowercase__ = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = 'cpu' lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**_A) lowercase__ = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) lowercase__ = pipe(**self.get_dummy_inputs(_A)) lowercase__ = output.images[0] lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) lowercase__ = np.array( [ 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, 0.00_03_92_16, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def UpperCAmelCase ( self : str) -> Any: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = torch_device == 'cpu' lowercase__ = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_A , relax_max_difference=_A , ) def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**_A) lowercase__ = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) lowercase__ = 1 lowercase__ = 2 lowercase__ = self.get_dummy_inputs(_A) for key in inputs.keys(): if key in self.batch_params: lowercase__ = batch_size * [inputs[key]] lowercase__ = pipe(**_A , num_images_per_prompt=_A)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : str) -> str: """simple docstring""" lowercase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy') lowercase__ = ShapEPipeline.from_pretrained('openai/shap-e') lowercase__ = pipe.to(_A) pipe.set_progress_bar_config(disable=_A) lowercase__ = torch.Generator(device=_A).manual_seed(0) lowercase__ = pipe( 'a shark' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_A , _A)
711
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase) }
642
0
from collections.abc import Sequence def _lowerCAmelCase ( A__ = None ): if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) lowercase__ = nums[0] for i in range(1 , len(__snake_case ) ): lowercase__ = nums[i] lowercase__ = max(__snake_case , ans + num , __snake_case ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user a__ : Any = int(input("Enter number of elements : ").strip()) a__ : Tuple = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
712
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = block_sizes lowercase__ = num_decoder_layers lowercase__ = d_model lowercase__ = n_head lowercase__ = d_head lowercase__ = d_inner lowercase__ = hidden_act lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = 2 lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = initializer_std # Used in the tests to check the size of the first attention layer lowercase__ = n_head # Used in the tests to check the size of the first hidden state lowercase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowercase__ = self.num_hidden_layers + 2 def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str: """simple docstring""" lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = self.num_choices lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) A : Dict = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) A : Optional[int] = False A : Optional[int] = False def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = TFFunnelModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase) @require_tf class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) A : List[str] = False A : int = False def UpperCAmelCase ( self : Any) -> List[Any]: """simple docstring""" lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
642
0
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__=True , A__="pt" ): lowercase__ = {"""add_prefix_space""": True} if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and not line.startswith(' ' ) else {} lowercase__ = padding_side return tokenizer( [line] , max_length=lowerCamelCase_ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase_ , return_tensors=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ , **lowerCamelCase_ , ) def _lowerCAmelCase ( A__ , A__ , A__=None , ): lowercase__ = input_ids.ne(lowerCamelCase_ ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class UpperCAmelCase__( lowercase_ ): '''simple docstring''' def __init__( self : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : List[str]="train" , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : Union[str, Any]="" , ) -> Optional[Any]: """simple docstring""" super().__init__() lowercase__ = Path(lowerCamelCase_).joinpath(type_path + '.source') lowercase__ = Path(lowerCamelCase_).joinpath(type_path + '.target') lowercase__ = self.get_char_lens(self.src_file) lowercase__ = max_source_length lowercase__ = max_target_length assert min(self.src_lens) > 0, f'''found empty line in {self.src_file}''' lowercase__ = tokenizer lowercase__ = prefix if n_obs is not None: lowercase__ = self.src_lens[:n_obs] lowercase__ = src_lang lowercase__ = tgt_lang def __len__( self : List[Any]) -> Tuple: """simple docstring""" return len(self.src_lens) def __getitem__( self : Union[str, Any] , lowerCAmelCase : Optional[Any]) -> str: """simple docstring""" lowercase__ = index + 1 # linecache starts at 1 lowercase__ = self.prefix + linecache.getline(str(self.src_file) , lowerCamelCase_).rstrip('\n') lowercase__ = linecache.getline(str(self.tgt_file) , lowerCamelCase_).rstrip('\n') assert source_line, f'''empty source line for index {index}''' assert tgt_line, f'''empty tgt line for index {index}''' # Need to add eos token manually for T5 if isinstance(self.tokenizer , lowerCamelCase_): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase__ = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_) else self.tokenizer ) lowercase__ = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_) else self.tokenizer lowercase__ = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , 'right') lowercase__ = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , 'right') lowercase__ = source_inputs["""input_ids"""].squeeze() lowercase__ = target_inputs["""input_ids"""].squeeze() lowercase__ = source_inputs["""attention_mask"""].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def UpperCAmelCase ( lowerCAmelCase : Dict) -> List[str]: """simple docstring""" return [len(lowerCamelCase_) for x in Path(lowerCamelCase_).open().readlines()] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Any) -> List[Any]: """simple docstring""" lowercase__ = torch.stack([x['input_ids'] for x in batch]) lowercase__ = torch.stack([x['attention_mask'] for x in batch]) lowercase__ = torch.stack([x['decoder_input_ids'] for x in batch]) lowercase__ = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_) else self.tokenizer.pad_token_id ) lowercase__ = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , lowerCamelCase_) else self.tokenizer.pad_token_id ) lowercase__ = trim_batch(lowerCamelCase_ , lowerCamelCase_) lowercase__ = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_) lowercase__ = { """input_ids""": source_ids, """attention_mask""": source_mask, """decoder_input_ids""": y, } return batch a__ : str = getLogger(__name__) def _lowerCAmelCase ( A__ ): return list(itertools.chain.from_iterable(lowerCamelCase_ ) ) def _lowerCAmelCase ( A__ ): lowercase__ = get_git_info() save_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , 'git_log.json' ) ) def _lowerCAmelCase ( A__ , A__ , A__=4 , **A__ ): with open(lowerCamelCase_ , 'w' ) as f: json.dump(lowerCamelCase_ , lowerCamelCase_ , indent=lowerCamelCase_ , **lowerCamelCase_ ) def _lowerCAmelCase ( A__ ): with open(lowerCamelCase_ ) as f: return json.load(lowerCamelCase_ ) def _lowerCAmelCase ( ): lowercase__ = git.Repo(search_parent_directories=lowerCamelCase_ ) lowercase__ = { """repo_id""": str(lowerCamelCase_ ), """repo_sha""": str(repo.head.object.hexsha ), """repo_branch""": str(repo.active_branch ), """hostname""": str(socket.gethostname() ), } return repo_infos def _lowerCAmelCase ( A__ , A__ ): return list(map(lowerCamelCase_ , lowerCamelCase_ ) ) def _lowerCAmelCase ( A__ , A__ ): with open(lowerCamelCase_ , 'wb' ) as f: return pickle.dump(lowerCamelCase_ , lowerCamelCase_ ) def _lowerCAmelCase ( A__ ): def remove_articles(A__ ): return re.sub(r'\b(a|an|the)\b' , ' ' , lowerCamelCase_ ) def white_space_fix(A__ ): return " ".join(text.split() ) def remove_punc(A__ ): lowercase__ = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(A__ ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_ ) ) ) ) def _lowerCAmelCase ( A__ , A__ ): lowercase__ = normalize_answer(lowerCamelCase_ ).split() lowercase__ = normalize_answer(lowerCamelCase_ ).split() lowercase__ = Counter(lowerCamelCase_ ) & Counter(lowerCamelCase_ ) lowercase__ = sum(common.values() ) if num_same == 0: return 0 lowercase__ = 1.0 * num_same / len(lowerCamelCase_ ) lowercase__ = 1.0 * num_same / len(lowerCamelCase_ ) lowercase__ = (2 * precision * recall) / (precision + recall) return fa def _lowerCAmelCase ( A__ , A__ ): return normalize_answer(lowerCamelCase_ ) == normalize_answer(lowerCamelCase_ ) def _lowerCAmelCase ( A__ , A__ ): assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) lowercase__ = 0 for hypo, pred in zip(lowerCamelCase_ , lowerCamelCase_ ): em += exact_match_score(lowerCamelCase_ , lowerCamelCase_ ) if len(lowerCamelCase_ ) > 0: em /= len(lowerCamelCase_ ) return {"em": em} def _lowerCAmelCase ( A__ ): return model_prefix.startswith('rag' ) def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase__ = """dropout_rate""" for p in extra_params: if getattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): if not hasattr(lowerCamelCase_ , lowerCamelCase_ ) and not hasattr(lowerCamelCase_ , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) continue lowercase__ = p if hasattr(lowerCamelCase_ , lowerCamelCase_ ) else equivalent_param[p] setattr(lowerCamelCase_ , lowerCamelCase_ , getattr(lowerCamelCase_ , lowerCamelCase_ ) ) delattr(lowerCamelCase_ , lowerCamelCase_ ) return hparams, config
713
def _lowerCAmelCase ( A__ , A__ , A__ ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate lowercase__ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowercase__ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
642
0
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') lowercase__ = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip') model.to(UpperCamelCase_) from datasets import load_dataset lowercase__ = load_dataset('nielsr/rvlcdip-demo') lowercase__ = dataset['train'][0]['image'].convert('RGB') lowercase__ = image_processor(UpperCamelCase_ , return_tensors='pt').to(UpperCamelCase_) # forward pass with torch.no_grad(): lowercase__ = model(**UpperCamelCase_) lowercase__ = outputs.logits lowercase__ = torch.Size((1, 16)) self.assertEqual(logits.shape , UpperCamelCase_) lowercase__ = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=UpperCamelCase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase_ , atol=1E-4))
714
from __future__ import annotations def _lowerCAmelCase ( A__ , A__ ): if b == 0: return (1, 0) ((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b ) lowercase__ = a // b return (y, x - k * y) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m def _lowerCAmelCase ( A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) if b < 0: lowercase__ = (b % n + n) % n return b def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="chinese_remainder_theorem", verbose=True) testmod(name="chinese_remainder_theorem2", verbose=True) testmod(name="invert_modulo", verbose=True) testmod(name="extended_euclid", verbose=True)
642
0
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase__( _UpperCamelCase , unittest.TestCase ): '''simple docstring''' A : Optional[int] = DiTPipeline A : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A : Union[str, Any] = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } A : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A : Optional[Any] = False def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" torch.manual_seed(0) lowercase__ = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=lowerCAmelCase , ) lowercase__ = AutoencoderKL() lowercase__ = DDIMScheduler() lowercase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : int=0) -> str: """simple docstring""" if str(lowerCAmelCase).startswith('mps'): lowercase__ = torch.manual_seed(lowerCAmelCase) else: lowercase__ = torch.Generator(device=lowerCAmelCase).manual_seed(lowerCAmelCase) lowercase__ = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = "cpu" lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**lowerCAmelCase) pipe.to(lowerCAmelCase) pipe.set_progress_bar_config(disable=lowerCAmelCase) lowercase__ = self.get_dummy_inputs(lowerCAmelCase) lowercase__ = pipe(**lowerCAmelCase).images lowercase__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3)) lowercase__ = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57]) lowercase__ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(lowerCAmelCase , 1E-3) def UpperCAmelCase ( self : str) -> Optional[Any]: """simple docstring""" self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase , expected_max_diff=1E-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def UpperCAmelCase ( self : Tuple) -> Tuple: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) @require_torch_gpu @slow class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" lowercase__ = torch.manual_seed(0) lowercase__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256') pipe.to('cuda') lowercase__ = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ = pipe.get_label_ids(lowerCAmelCase) lowercase__ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=40 , output_type='np').images for word, image in zip(lowerCAmelCase , lowerCAmelCase): lowercase__ = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''') assert np.abs((expected_image - image).max()) < 1E-2 def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512') lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to('cuda') lowercase__ = ["vase", "umbrella"] lowercase__ = pipe.get_label_ids(lowerCAmelCase) lowercase__ = torch.manual_seed(0) lowercase__ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type='np').images for word, image in zip(lowerCAmelCase , lowerCAmelCase): lowercase__ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'''/dit/{word}_512.npy''') assert np.abs((expected_image - image).max()) < 1E-1
715
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[Any] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = "umt5" A : List[str] = ["past_key_values"] def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__( is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split('-') lowercase__ = act_info[-1] lowercase__ = act_info[0] == 'gated' if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": lowercase__ = 'gelu_new' @property def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" return self.d_model @property def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" return self.num_heads @property def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return self.num_layers class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowercase__ = 'past_encoder_sequence + sequence' lowercase__ = {0: 'batch'} lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowercase__ = {0: 'batch', 1: 'decoder_sequence'} lowercase__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCAmelCase ( self : int) -> int: """simple docstring""" return 13 @property def UpperCAmelCase ( self : Optional[Any]) -> float: """simple docstring""" return 5E-4
642
0
from collections.abc import Sequence def _lowerCAmelCase ( A__ , A__ ): return sum(c * (x**i) for i, c in enumerate(a__ ) ) def _lowerCAmelCase ( A__ , A__ ): lowercase__ = 0.0 for coeff in reversed(a__ ): lowercase__ = result * x + coeff return result if __name__ == "__main__": a__ : Tuple = (0.0, 0.0, 5.0, 9.3, 7.0) a__ : str = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
716
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : str = XGLMTokenizer A : List[Any] = XGLMTokenizerFast A : int = True A : Optional[Any] = True def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = '<pad>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : str) -> List[str]: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(len(lowerCAmelCase) , 10_08) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08) def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = tokenizer.tokenize('This is a test') self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return XGLMTokenizer.from_pretrained('facebook/xglm-564M') def UpperCAmelCase ( self : Optional[int]) -> Dict: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase , f.name) lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase) lowercase__ = pickle.dumps(lowerCAmelCase) pickle.loads(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.tokenize(lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @slow def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" lowercase__ = 'Hello World!' lowercase__ = [2, 3_12_27, 44_47, 35] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = { 'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
642
0
import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: a__ : Any = False a__ : int = logging.get_logger(__name__) a__ : List[Any] = "ybelkada/fonts" def _lowerCAmelCase ( ): if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( F'''You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ''' 'Pix2StructImageProcessor. Please upgrade torch.' ) def _lowerCAmelCase ( A__ , A__ , A__ ): requires_backends(snake_case__ , ['torch'] ) _check_torch_version() lowercase__ = image_tensor.unsqueeze(0 ) lowercase__ = torch.nn.functional.unfold(snake_case__ , (patch_height, patch_width) , stride=(patch_height, patch_width) ) lowercase__ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , snake_case__ , snake_case__ , -1 ) lowercase__ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def _lowerCAmelCase ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ): requires_backends(snake_case__ , 'vision' ) # Add new lines so that each line is no more than 80 characters. lowercase__ = textwrap.TextWrapper(width=80 ) lowercase__ = wrapper.wrap(text=snake_case__ ) lowercase__ = '\n'.join(snake_case__ ) if font_bytes is not None and font_path is None: lowercase__ = io.BytesIO(snake_case__ ) elif font_path is not None: lowercase__ = font_path else: lowercase__ = hf_hub_download(snake_case__ , 'Arial.TTF' ) lowercase__ = ImageFont.truetype(snake_case__ , encoding='UTF-8' , size=snake_case__ ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. lowercase__ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , snake_case__ ) ) lowercase__, lowercase__, lowercase__, lowercase__ = temp_draw.textbbox((0, 0) , snake_case__ , snake_case__ ) # Create the actual image with a bit of padding around the text. lowercase__ = text_width + left_padding + right_padding lowercase__ = text_height + top_padding + bottom_padding lowercase__ = Image.new('RGB' , (image_width, image_height) , snake_case__ ) lowercase__ = ImageDraw.Draw(snake_case__ ) draw.text(xy=(left_padding, top_padding) , text=snake_case__ , fill=snake_case__ , font=snake_case__ ) return image def _lowerCAmelCase ( A__ , A__ , **A__ ): requires_backends(snake_case__ , 'vision' ) # Convert to PIL image if necessary lowercase__ = to_pil_image(snake_case__ ) lowercase__ = render_text(snake_case__ , **snake_case__ ) lowercase__ = max(header_image.width , image.width ) lowercase__ = int(image.height * (new_width / image.width) ) lowercase__ = int(header_image.height * (new_width / header_image.width) ) lowercase__ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary lowercase__ = to_numpy_array(snake_case__ ) if infer_channel_dimension_format(snake_case__ ) == ChannelDimension.LAST: lowercase__ = to_channel_dimension_format(snake_case__ , ChannelDimension.LAST ) return new_image class UpperCAmelCase__( __lowerCAmelCase ): '''simple docstring''' A : Tuple = ['''flattened_patches'''] def __init__( self : List[Any] , lowerCAmelCase : int = True , lowerCAmelCase : Union[str, Any] = True , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Dict = 20_48 , lowerCAmelCase : int = False , **lowerCAmelCase : Any , ) -> None: """simple docstring""" super().__init__(**lowerCAmelCase_) lowercase__ = patch_size if patch_size is not None else {'height': 16, 'width': 16} lowercase__ = do_normalize lowercase__ = do_convert_rgb lowercase__ = max_patches lowercase__ = is_vqa def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , **lowerCAmelCase : Optional[int]) -> np.ndarray: """simple docstring""" requires_backends(self.extract_flattened_patches , 'torch') _check_torch_version() # convert to torch lowercase__ = to_channel_dimension_format(lowerCAmelCase_ , ChannelDimension.FIRST) lowercase__ = torch.from_numpy(lowerCAmelCase_) lowercase__, lowercase__ = patch_size['height'], patch_size['width'] lowercase__, lowercase__ = get_image_size(lowerCAmelCase_) # maximize scale s.t. lowercase__ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) lowercase__ = max(min(math.floor(scale * image_height / patch_height) , lowerCAmelCase_) , 1) lowercase__ = max(min(math.floor(scale * image_width / patch_width) , lowerCAmelCase_) , 1) lowercase__ = max(num_feasible_rows * patch_height , 1) lowercase__ = max(num_feasible_cols * patch_width , 1) lowercase__ = torch.nn.functional.interpolate( image.unsqueeze(0) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=lowerCAmelCase_ , antialias=lowerCAmelCase_ , ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] lowercase__ = torch_extract_patches(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) lowercase__ = patches.shape lowercase__ = patches_shape[1] lowercase__ = patches_shape[2] lowercase__ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] lowercase__ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] lowercase__ = torch.arange(lowerCAmelCase_).reshape([rows, 1]).repeat(1 , lowerCAmelCase_).reshape([rows * columns, 1]) lowercase__ = torch.arange(lowerCAmelCase_).reshape([1, columns]).repeat(lowerCAmelCase_ , 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] lowercase__ = row_ids.to(torch.floataa) lowercase__ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] lowercase__ = torch.cat([row_ids, col_ids, patches] , -1) # [max_patches, 2 + patch_height * patch_width * image_channels] lowercase__ = torch.nn.functional.pad(lowerCAmelCase_ , [0, 0, 0, max_patches - (rows * columns)]).float() lowercase__ = to_numpy_array(lowerCAmelCase_) return result def UpperCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : List[Any] = None , **lowerCAmelCase : Tuple) -> np.ndarray: """simple docstring""" if image.dtype == np.uinta: lowercase__ = image.astype(np.floataa) # take mean across the whole `image` lowercase__ = np.mean(lowerCAmelCase_) lowercase__ = np.std(lowerCAmelCase_) lowercase__ = max(lowerCAmelCase_ , 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , **lowerCAmelCase_) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : int = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : str = None , lowerCAmelCase : Optional[Any] = None , lowerCAmelCase : Dict = None , lowerCAmelCase : Dict = ChannelDimension.FIRST , **lowerCAmelCase : Any , ) -> ImageInput: """simple docstring""" lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ = patch_size if patch_size is not None else self.patch_size lowercase__ = max_patches if max_patches is not None else self.max_patches lowercase__ = self.is_vqa if kwargs.get('data_format' , lowerCAmelCase_) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') lowercase__ = make_list_of_images(lowerCAmelCase_) if not valid_images(lowerCAmelCase_): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ = [convert_to_rgb(lowerCAmelCase_) for image in images] # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(lowerCAmelCase_) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') lowercase__ = kwargs.pop('font_bytes' , lowerCAmelCase_) lowercase__ = kwargs.pop('font_path' , lowerCAmelCase_) if isinstance(lowerCAmelCase_ , lowerCAmelCase_): lowercase__ = [header_text] * len(lowerCAmelCase_) lowercase__ = [ render_header(lowerCAmelCase_ , header_text[i] , font_bytes=lowerCAmelCase_ , font_path=lowerCAmelCase_) for i, image in enumerate(lowerCAmelCase_) ] if do_normalize: lowercase__ = [self.normalize(image=lowerCAmelCase_) for image in images] # convert to torch tensor and permute lowercase__ = [ self.extract_flattened_patches(image=lowerCAmelCase_ , max_patches=lowerCAmelCase_ , patch_size=lowerCAmelCase_) for image in images ] # create attention mask in numpy lowercase__ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] lowercase__ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=lowerCAmelCase_) return encoded_outputs
717
import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCAmelCase__: '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" lowercase__ = data lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64) lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data)) return padded_data def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64) ] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]: """simple docstring""" lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64 for i in range(16 , 80): lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1) return w def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.padding() lowercase__ = self.split_blocks() for block in self.blocks: lowercase__ = self.expand_block(lowerCAmelCase) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h for i in range(0 , 80): if 0 <= i < 20: lowercase__ = (b & c) | ((~b) & d) lowercase__ = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase__ = b ^ c ^ d lowercase__ = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase__ = (b & c) | (b & d) | (c & d) lowercase__ = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase__ = b ^ c ^ d lowercase__ = 0XC_A_6_2_C_1_D_6 lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = ( self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(lowerCAmelCase , 30), c, d, ) lowercase__ = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h) def _lowerCAmelCase ( ): lowercase__ = B'Test String' assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def _lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowercase__ = parser.parse_args() lowercase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase__ = f.read() else: lowercase__ = bytes(A__ , 'utf-8' ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
642
0
import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def _lowerCAmelCase ( A__ ): # picklable for multiprocessing return x.sum() def _lowerCAmelCase ( A__ ): # picklable for multiprocessing return i + 1 @dataclass class UpperCAmelCase__: '''simple docstring''' A : int A : str class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" lowercase__ = {} lowercase__ = [] lowercase__ = 1 lowercase__ = [1, 2] lowercase__ = {'''a''': 1, '''b''': 2} lowercase__ = {'''a''': [1, 2], '''b''': [3, 4]} lowercase__ = {'''a''': {'''1''': 1}, '''b''': 2} lowercase__ = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} lowercase__ = {} lowercase__ = [] lowercase__ = 2 lowercase__ = [2, 3] lowercase__ = {'''a''': 2, '''b''': 3} lowercase__ = {'''a''': [2, 3], '''b''': [4, 5]} lowercase__ = {'''a''': {'''1''': 2}, '''b''': 3} lowercase__ = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__) , UpperCAmelCase__) lowercase__ = 2 self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) lowercase__ = {'''a''': np.eye(2), '''b''': np.zeros(3), '''c''': np.ones(2)} lowercase__ = {'''a''': 2, '''b''': 0, '''c''': 2} lowercase__ = { '''a''': np.eye(2).astype(UpperCAmelCase__), '''b''': np.zeros(3).astype(UpperCAmelCase__), '''c''': np.ones(2).astype(UpperCAmelCase__), } self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ , num_proc=UpperCAmelCase__) , UpperCAmelCase__) self.assertEqual( {k: v.tolist() for k, v in map_nested(UpperCAmelCase__ , UpperCAmelCase__ , map_numpy=UpperCAmelCase__ , num_proc=UpperCAmelCase__).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(UpperCAmelCase__): # can't pickle a local lambda map_nested(lambda lowerCAmelCase: x + 1 , UpperCAmelCase__ , num_proc=UpperCAmelCase__) def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = {'''a''': 1, '''b''': 2} lowercase__ = {'''a''': 3, '''b''': 4} lowercase__ = {'''a''': 5, '''b''': 6} lowercase__ = sorted([('a', (1, 3, 5)), ('b', (2, 4, 6))]) self.assertEqual(sorted(zip_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__)) , UpperCAmelCase__) def UpperCAmelCase ( self : List[str]) -> Dict: """simple docstring""" class UpperCAmelCase__: '''simple docstring''' A : str = 'bar' lowercase__ = Foo() self.assertEqual(foo.my_attr , 'bar') with temporary_assignment(UpperCAmelCase__ , 'my_attr' , 'BAR'): self.assertEqual(foo.my_attr , 'BAR') self.assertEqual(foo.my_attr , 'bar') @pytest.mark.parametrize( 'iterable_length, num_proc, expected_num_proc' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def _lowerCAmelCase ( A__ , A__ , A__ ): with patch('datasets.utils.py_utils._single_map_nested' ) as mock_single_map_nested, patch( 'datasets.parallel.parallel.Pool' ) as mock_multiprocessing_pool: lowercase__ = {F'''{i}''': i for i in range(A__ )} lowercase__ = map_nested(lambda A__ : x + 10 , A__ , num_proc=A__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @require_tf def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" import tensorflow as tf from tensorflow.keras import layers lowercase__ = layers.Dense(2) def gen_random_output(): lowercase__ = tf.random.uniform((1, 3)) return model(UpperCAmelCase__).numpy() with temp_seed(42 , set_tensorflow=UpperCAmelCase__): lowercase__ = gen_random_output() with temp_seed(42 , set_tensorflow=UpperCAmelCase__): lowercase__ = gen_random_output() lowercase__ = gen_random_output() np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__) self.assertGreater(np.abs(outa - outa).sum() , 0) @require_torch def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" import torch def gen_random_output(): lowercase__ = torch.nn.Linear(3 , 2) lowercase__ = torch.rand(1 , 3) return model(UpperCAmelCase__).detach().numpy() with temp_seed(42 , set_pytorch=UpperCAmelCase__): lowercase__ = gen_random_output() with temp_seed(42 , set_pytorch=UpperCAmelCase__): lowercase__ = gen_random_output() lowercase__ = gen_random_output() np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__) self.assertGreater(np.abs(outa - outa).sum() , 0) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" def gen_random_output(): return np.random.rand(1 , 3) with temp_seed(42): lowercase__ = gen_random_output() with temp_seed(42): lowercase__ = gen_random_output() lowercase__ = gen_random_output() np.testing.assert_equal(UpperCAmelCase__ , UpperCAmelCase__) self.assertGreater(np.abs(outa - outa).sum() , 0) @pytest.mark.parametrize('input_data' , [{}] ) def _lowerCAmelCase ( A__ ): lowercase__ = NestedDataStructure(A__ ).data assert output_data == input_data @pytest.mark.parametrize( 'data, expected_output' , [ ({}, []), ([], []), ('foo', ['foo']), (['foo', 'bar'], ['foo', 'bar']), ([['foo', 'bar']], ['foo', 'bar']), ([[['foo'], ['bar']]], ['foo', 'bar']), ([[['foo'], 'bar']], ['foo', 'bar']), ({'a': 1, 'b': 2}, [1, 2]), ({'a': [1, 2], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[1, 2]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[[3], [4]]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [[3, 4]]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, 4]}, [1, 2, 3, 4]), ({'a': [[[1], [2]]], 'b': [3, [4]]}, [1, 2, 3, 4]), ({'a': {'1': 1}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': 2}, [1, 2]), ({'a': {'1': [1]}, 'b': [2]}, [1, 2]), ] , ) def _lowerCAmelCase ( A__ , A__ ): lowercase__ = NestedDataStructure(A__ ).flatten() assert output == expected_output def _lowerCAmelCase ( ): lowercase__ = A(x=1 , y='foobar' ) lowercase__ = {'''x''': 1, '''y''': '''foobar'''} assert asdict(A__ ) == expected_output lowercase__ = {'''a''': {'''b''': A(x=10 , y='foo' )}, '''c''': [A(x=20 , y='bar' )]} lowercase__ = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(A__ ) == expected_output with pytest.raises(A__ ): asdict([1, A(x=10 , y='foo' )] ) def _lowerCAmelCase ( A__ ): return text.split() def _lowerCAmelCase ( A__ ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def _lowerCAmelCase ( ): with Pool(2 ) as pool: lowercase__ = list(iflatmap_unordered(A__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(A__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: lowercase__ = list(iflatmap_unordered(A__ , _split_text , kwargs_iterable=[{'text': 'hello there'}] * 10 ) ) assert out.count('hello' ) == 10 assert out.count('there' ) == 10 assert len(A__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: lowercase__ = [] for yield_time, content in iflatmap_unordered( A__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'content': 'a'}, {'content': 'b'}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(A__ ) assert out.count('a' ) == 2 assert out.count('b' ) == 2 assert len(A__ ) == 4
718
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart a__ : List[Any] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } a__ : int = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Optional[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : int = ["input_ids", "attention_mask"] A : Any = BartTokenizer def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type')) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**lowerCAmelCase) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = 'post_processor' lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state['sep']) if "cls" in state: lowercase__ = tuple(state['cls']) lowercase__ = False if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(lowerCAmelCase , state.pop('type')) lowercase__ = component_class(**lowerCAmelCase) setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) @property def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]: """simple docstring""" lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value lowercase__ = value def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase) return tuple(lowerCAmelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple: """simple docstring""" lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
642
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : List[str] = { """facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""", # See all LeViT models at https://huggingface.co/models?filter=levit } class UpperCAmelCase__( SCREAMING_SNAKE_CASE__ ): A : Optional[int] = "levit" def __init__( self : Union[str, Any] , lowerCAmelCase : Dict=2_24 , lowerCAmelCase : Any=3 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : Dict=2 , lowerCAmelCase : List[str]=1 , lowerCAmelCase : Tuple=16 , lowerCAmelCase : List[Any]=[1_28, 2_56, 3_84] , lowerCAmelCase : List[Any]=[4, 8, 12] , lowerCAmelCase : Optional[int]=[4, 4, 4] , lowerCAmelCase : Tuple=[16, 16, 16] , lowerCAmelCase : List[str]=0 , lowerCAmelCase : List[str]=[2, 2, 2] , lowerCAmelCase : Optional[int]=[2, 2, 2] , lowerCAmelCase : Optional[int]=0.02 , **lowerCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(**snake_case__) lowercase__ = image_size lowercase__ = num_channels lowercase__ = kernel_size lowercase__ = stride lowercase__ = padding lowercase__ = hidden_sizes lowercase__ = num_attention_heads lowercase__ = depths lowercase__ = key_dim lowercase__ = drop_path_rate lowercase__ = patch_size lowercase__ = attention_ratio lowercase__ = mlp_ratio lowercase__ = initializer_range lowercase__ = [ ["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class UpperCAmelCase__( SCREAMING_SNAKE_CASE__ ): A : Optional[Any] = version.parse("1.11" ) @property def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def UpperCAmelCase ( self : str) -> str: """simple docstring""" return 1E-4
719
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : str = (DDIMParallelScheduler,) A : Any = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowerCAmelCase) return config def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**lowerCAmelCase) lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase) for t in scheduler.timesteps: lowercase__ = model(lowerCAmelCase , lowerCAmelCase) lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample return sample def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1) lowercase__ = scheduler_class(**lowerCAmelCase) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1])) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , ) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]): self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]): self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5 def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 scheduler.set_timesteps(lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = self.dummy_sample_deter + 0.1 lowercase__ = self.dummy_sample_deter - 0.1 lowercase__ = samplea.shape[0] lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0) lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase) lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 11_47.79_04) < 1E-2 assert abs(result_mean.item() - 0.49_82) < 1E-3 def UpperCAmelCase ( self : Any) -> int: """simple docstring""" lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_72.00_67) < 1E-2 assert abs(result_mean.item() - 0.22_39_67) < 1E-3 def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(prediction_type='v_prediction') lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 52.53_02) < 1E-2 assert abs(result_mean.item() - 0.06_84) < 1E-3 def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.82_95) < 1E-2 assert abs(result_mean.item() - 0.19_51) < 1E-3 def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.07_84) < 1E-2 assert abs(result_mean.item() - 0.19_41) < 1E-3
642
0
from bisect import bisect from itertools import accumulate def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__ = sorted(zip(__a , __a ) , key=lambda A__ : x[0] / x[1] , reverse=__a ) lowercase__, lowercase__ = [i[0] for i in r], [i[1] for i in r] lowercase__ = list(accumulate(__a ) ) lowercase__ = bisect(__a , __a ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
720
import cva import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict: """simple docstring""" if k in (0.04, 0.06): lowercase__ = k lowercase__ = window_size else: raise ValueError('invalid k value') def __str__( self : Tuple) -> str: """simple docstring""" return str(self.k) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowercase__ = cva.imread(lowerCAmelCase , 0) lowercase__, lowercase__ = img.shape lowercase__ = [] lowercase__ = img.copy() lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB) lowercase__, lowercase__ = np.gradient(lowerCAmelCase) lowercase__ = dx**2 lowercase__ = dy**2 lowercase__ = dx * dy lowercase__ = 0.04 lowercase__ = self.window_size // 2 for y in range(lowerCAmelCase , h - offset): for x in range(lowerCAmelCase , w - offset): lowercase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = (wxx * wyy) - (wxy**2) lowercase__ = wxx + wyy lowercase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0) , 0) color_img.itemset((y, x, 1) , 0) color_img.itemset((y, x, 2) , 2_55) return color_img, corner_list if __name__ == "__main__": a__ : Dict = HarrisCorner(0.0_4, 3) a__ , a__ : Dict = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
642
0
import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() a__ : Optional[Any] = logging.get_logger(__name__) def _lowerCAmelCase ( A__ ): lowercase__ = torch.load(A__ , map_location='cpu' ) if "model" in sd.keys(): lowercase__ = torch.load(A__ , map_location='cpu' )["model"] # pop unnecessary weights lowercase__ = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(A__ ) lowercase__ = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: lowercase__ = sd.pop(A__ ) lowercase__ = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: lowercase__ = sd[key] # We split QKV in separate Q,K,V lowercase__ = key.replace('.qkv_proj.' , '.q_proj.' ) lowercase__ = key.replace('.qkv_proj.' , '.k_proj.' ) lowercase__ = key.replace('.qkv_proj.' , '.v_proj.' ) lowercase__ = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 lowercase__ = torch.split(A__ , depth // 3 , dim=0 ) lowercase__ = q lowercase__ = k lowercase__ = v del sd[key] return sd @torch.no_grad() def _lowerCAmelCase ( A__ , A__ , A__=None ): lowercase__ = load_checkpoint(A__ ) if config is not None: lowercase__ = OPTConfig.from_pretrained(A__ ) else: lowercase__ = OPTConfig() lowercase__ = OPTModel(A__ ).half().eval() model.load_state_dict(A__ ) # Check results Path(A__ ).mkdir(exist_ok=A__ ) model.save_pretrained(A__ ) if __name__ == "__main__": a__ : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") a__ : Optional[Any] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
721
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : List[Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : int = "speech_to_text" A : Optional[Any] = ["past_key_values"] A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict: """simple docstring""" lowercase__ = vocab_size lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = max_source_positions lowercase__ = max_target_positions lowercase__ = num_conv_layers lowercase__ = list(lowerCAmelCase) lowercase__ = conv_channels lowercase__ = input_feat_per_channel lowercase__ = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''') super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
642
0
import torch def _lowerCAmelCase ( ): if torch.cuda.is_available(): lowercase__ = torch.cuda.device_count() else: lowercase__ = 0 print(F'''Successfully ran on {num_gpus} GPUs''' ) if __name__ == "__main__": main()
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : int = "▁" a__ : Dict = {"vocab_file": "spiece.model"} a__ : int = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"} } a__ : List[str] = { "google/pegasus-xsum": 5_12, } a__ : Any = logging.get_logger(__name__) class UpperCAmelCase__( __UpperCAmelCase ): '''simple docstring''' A : List[Any] = VOCAB_FILES_NAMES A : List[str] = VOCAB_FILES_NAMES A : int = PRETRAINED_VOCAB_FILES_MAP A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : str="<pad>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Tuple="<unk>" , lowerCAmelCase : List[Any]="<mask_2>" , lowerCAmelCase : Union[str, Any]="<mask_1>" , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=1_03 , lowerCAmelCase : Optional[Dict[str, Any]] = None , **lowerCAmelCase : List[str] , ) -> Optional[int]: """simple docstring""" lowercase__ = offset if additional_special_tokens is not None: if not isinstance(_lowerCamelCase , _lowerCamelCase): raise TypeError( f'''additional_special_tokens should be of type {type(_lowerCamelCase)}, but is''' f''' {type(_lowerCamelCase)}''') lowercase__ = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(_lowerCamelCase) , self.offset - 1) ] if len(set(_lowerCamelCase)) != len(_lowerCamelCase): raise ValueError( 'Please make sure that the provided additional_special_tokens do not contain an incorrectly' f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''') lowercase__ = additional_special_tokens_extended else: lowercase__ = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset)] lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , mask_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token_sent=_lowerCamelCase , offset=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , ) lowercase__ = mask_token_sent lowercase__ = vocab_file lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_lowerCamelCase) # add special tokens to encoder dict lowercase__ = { 0: self.pad_token, 1: self.eos_token, } if self.mask_token_sent is not None: self.encoder.update( { 2: self.mask_token_sent, 3: self.mask_token, }) if self.offset > 0: # entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102 # mask_token_sent is already added to list -> so start at 1 self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1)}) lowercase__ = {v: k for k, v in self.encoder.items()} @property def UpperCAmelCase ( self : Tuple) -> Tuple: """simple docstring""" return len(self.sp_model) + self.offset def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" lowercase__ = {self.convert_ids_to_tokens(_lowerCamelCase): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Tuple) -> str: """simple docstring""" lowercase__ = self.__dict__.copy() lowercase__ = None return state def __setstate__( self : str , lowerCAmelCase : List[str]) -> List[str]: """simple docstring""" lowercase__ = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs'): lowercase__ = {} lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str) -> Tuple: """simple docstring""" if token in self.decoder: return self.decoder[token] elif token in self.added_tokens_decoder: return self.added_tokens_decoder[token] lowercase__ = self.sp_model.piece_to_id(_lowerCamelCase) return sp_id + self.offset def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Dict: """simple docstring""" if index in self.encoder: return self.encoder[index] elif index in self.added_tokens_encoder: return self.added_tokens_encoder[index] else: lowercase__ = self.sp_model.IdToPiece(index - self.offset) return token def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[Any]) -> Any: """simple docstring""" lowercase__ = [] lowercase__ = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_lowerCamelCase) + token lowercase__ = [] else: current_sub_tokens.append(_lowerCamelCase) out_string += self.sp_model.decode(_lowerCamelCase) return out_string.strip() def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any]=False) -> int: """simple docstring""" return 1 def UpperCAmelCase ( self : Any , lowerCAmelCase : Tuple) -> str: """simple docstring""" lowercase__ = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special return [1 if x in all_special_ids else 0 for x in seq] def UpperCAmelCase ( self : int , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> Any: """simple docstring""" if already_has_special_tokens: return self._special_token_mask(_lowerCamelCase) elif token_ids_a is None: return self._special_token_mask(_lowerCamelCase) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple=None) -> Optional[Any]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> List[Any]: """simple docstring""" if not os.path.isdir(_lowerCamelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return lowercase__ = os.path.join( _lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(_lowerCamelCase) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _lowerCamelCase) elif not os.path.isfile(self.vocab_file): with open(_lowerCamelCase , 'wb') as fi: lowercase__ = self.sp_model.serialized_model_proto() fi.write(_lowerCamelCase) return (out_vocab_file,)
701
# Imports import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int: """simple docstring""" if red is not None: lowercase__ = red if green is not None: lowercase__ = green if blue is not None: lowercase__ = blue if red_edge is not None: lowercase__ = red_edge if nir is not None: lowercase__ = nir return True def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) lowercase__ = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!') return False def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def UpperCAmelCase ( self : int) -> Any: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" return self.nir * (self.red / (self.green**2)) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return (self.nir / self.green) - 1 def UpperCAmelCase ( self : Any) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" return (self.red - self.blue) / self.red def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" return self.nir - self.green def UpperCAmelCase ( self : Tuple) -> List[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def UpperCAmelCase ( self : str) -> int: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple: """simple docstring""" return (self.nir - b) / (a * self.red) def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def UpperCAmelCase ( self : int) -> str: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return self.green / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" return self.red / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max_value - min_value) / max_value def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
642
0
'''simple docstring''' import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase__: '''simple docstring''' def __init__( self : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : int=True , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : int=False , lowerCAmelCase : Tuple=2 , lowerCAmelCase : Any=99 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Any=32 , lowerCAmelCase : Tuple=5 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Dict=2 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : int="last" , lowerCAmelCase : Tuple=True , lowerCAmelCase : str=None , lowerCAmelCase : List[str]=0 , ) -> Dict: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_lengths lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = gelu_activation lowercase__ = sinusoidal_embeddings lowercase__ = causal lowercase__ = asm lowercase__ = n_langs lowercase__ = vocab_size lowercase__ = n_special lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = num_labels lowercase__ = num_choices lowercase__ = summary_type lowercase__ = use_proj lowercase__ = scope lowercase__ = bos_token_id def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_input_lengths: lowercase__ = ( ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , 2).float() lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : int , ) -> Optional[int]: """simple docstring""" lowercase__ = XLMModel(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = model(__lowerCamelCase , lengths=__lowerCamelCase , langs=__lowerCamelCase) lowercase__ = model(__lowerCamelCase , langs=__lowerCamelCase) lowercase__ = model(__lowerCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Tuple , ) -> int: """simple docstring""" lowercase__ = XLMWithLMHeadModel(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = model(__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = XLMForQuestionAnsweringSimple(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = model(__lowerCamelCase) lowercase__ = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase) lowercase__ = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , ) -> int: """simple docstring""" lowercase__ = XLMForQuestionAnswering(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = model(__lowerCamelCase) lowercase__ = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , p_mask=__lowerCamelCase , ) lowercase__ = model( __lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , cls_index=__lowerCamelCase , is_impossible=__lowerCamelCase , ) (lowercase__ ) = result_with_labels.to_tuple() lowercase__ = model(__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase) (lowercase__ ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , ()) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top)) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,)) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , ) -> str: """simple docstring""" lowercase__ = XLMForSequenceClassification(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = model(__lowerCamelCase) lowercase__ = model(__lowerCamelCase , labels=__lowerCamelCase) self.parent.assertEqual(result.loss.shape , ()) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , ) -> str: """simple docstring""" lowercase__ = self.num_labels lowercase__ = XLMForTokenClassification(__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Dict , ) -> Any: """simple docstring""" lowercase__ = self.num_choices lowercase__ = XLMForMultipleChoice(config=__lowerCamelCase) model.to(__lowerCamelCase) model.eval() lowercase__ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() lowercase__ = model( __lowerCamelCase , attention_mask=__lowerCamelCase , token_type_ids=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : Any) -> Tuple: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( lowercase__ ) = config_and_inputs lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths} return config, inputs_dict @require_torch class UpperCAmelCase__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' A : int = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) A : Union[str, Any] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable A : List[str] = ( { "feature-extraction": XLMModel, "fill-mask": XLMWithLMHeadModel, "question-answering": XLMForQuestionAnsweringSimple, "text-classification": XLMForSequenceClassification, "text-generation": XLMWithLMHeadModel, "token-classification": XLMForTokenClassification, "zero-shot": XLMForSequenceClassification, } if is_torch_available() else {} ) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : str , lowerCAmelCase : Tuple) -> Optional[Any]: """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast') ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any]=False) -> List[Any]: """simple docstring""" lowercase__ = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowercase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase) lowercase__ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__lowerCamelCase) return inputs_dict def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" lowercase__ = XLMModelTester(self) lowercase__ = ConfigTester(self , config_class=__lowerCamelCase , emb_dim=37) def UpperCAmelCase ( self : str) -> Union[str, Any]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*__lowerCamelCase) def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*__lowerCamelCase) def UpperCAmelCase ( self : str) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*__lowerCamelCase) def UpperCAmelCase ( self : str) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*__lowerCamelCase) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*__lowerCamelCase) def UpperCAmelCase ( self : List[Any]) -> Tuple: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*__lowerCamelCase) def UpperCAmelCase ( self : List[str]) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*__lowerCamelCase) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : str , lowerCAmelCase : int=False , lowerCAmelCase : Optional[Any]=1) -> Dict: """simple docstring""" self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) self.assertListEqual( [isinstance(__lowerCamelCase , __lowerCamelCase) for iter_attentions in attentions] , [True] * len(__lowerCamelCase)) self.assertEqual(len(__lowerCamelCase) , (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(__lowerCamelCase): # adds PAD dummy token lowercase__ = min_length + idx + 1 lowercase__ = min_length + idx + 1 lowercase__ = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(__lowerCamelCase)) def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Dict=1) -> Union[str, Any]: """simple docstring""" self.assertIsInstance(__lowerCamelCase , __lowerCamelCase) self.assertListEqual( [isinstance(__lowerCamelCase , __lowerCamelCase) for iter_hidden_states in hidden_states] , [True] * len(__lowerCamelCase) , ) self.assertEqual(len(__lowerCamelCase) , (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(__lowerCamelCase): # adds PAD dummy token lowercase__ = min_length + idx + 1 lowercase__ = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(__lowerCamelCase) , ) pass @slow def UpperCAmelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = XLMModel.from_pretrained(__lowerCamelCase) self.assertIsNotNone(__lowerCamelCase) @require_torch class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @slow def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048') model.to(__lowerCamelCase) lowercase__ = torch.tensor([[14, 4_47]] , dtype=torch.long , device=__lowerCamelCase) # the president lowercase__ = [ 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, 14, 4_47, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowercase__ = model.generate(__lowerCamelCase , do_sample=__lowerCamelCase) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , __lowerCamelCase)
702
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class UpperCAmelCase__( unittest.TestCase , lowerCamelCase ): '''simple docstring''' def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" lowercase__ = load_tool('text-classification') self.tool.setup() lowercase__ = load_tool('text-classification' , remote=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Tuple: """simple docstring""" lowercase__ = self.tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" lowercase__ = self.remote_tool('That\'s quite cool' , ['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive') def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative']) self.assertEqual(lowerCAmelCase , 'positive')
642
0
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class UpperCAmelCase__: '''simple docstring''' def __init__( self : str , lowerCAmelCase : List[str] , lowerCAmelCase : str=sys.maxsize) -> Any: """simple docstring""" lowercase__ = 'bilinear' lowercase__ = max_size lowercase__ = short_edge_length def __call__( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[str]: """simple docstring""" lowercase__ = [] for img in imgs: lowercase__, lowercase__ = img.shape[:2] # later: provide list and randomly choose index for resize lowercase__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img lowercase__ = size * 1.0 / min(_lowercase , _lowercase) if h < w: lowercase__, lowercase__ = size, scale * w else: lowercase__, lowercase__ = scale * h, size if max(_lowercase , _lowercase) > self.max_size: lowercase__ = self.max_size * 1.0 / max(_lowercase , _lowercase) lowercase__ = newh * scale lowercase__ = neww * scale lowercase__ = int(neww + 0.5) lowercase__ = int(newh + 0.5) if img.dtype == np.uinta: lowercase__ = Image.fromarray(_lowercase) lowercase__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) lowercase__ = np.asarray(_lowercase) else: lowercase__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw lowercase__ = nn.functional.interpolate( _lowercase , (newh, neww) , mode=self.interp_method , align_corners=_lowercase).squeeze(0) img_augs.append(_lowercase) return img_augs class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : int) -> Optional[Any]: """simple docstring""" lowercase__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) lowercase__ = cfg.INPUT.FORMAT lowercase__ = cfg.SIZE_DIVISIBILITY lowercase__ = cfg.PAD_VALUE lowercase__ = cfg.INPUT.MAX_SIZE_TEST lowercase__ = cfg.MODEL.DEVICE lowercase__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) lowercase__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) lowercase__ = lambda lowerCAmelCase: (x - self.pixel_mean) / self.pixel_std def UpperCAmelCase ( self : Any , lowerCAmelCase : str) -> Any: """simple docstring""" lowercase__ = tuple(max(_lowercase) for s in zip(*[img.shape for img in images])) lowercase__ = [im.shape[-2:] for im in images] lowercase__ = [ nn.functional.pad( _lowercase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(_lowercase , _lowercase) ] return torch.stack(_lowercase), torch.tensor(_lowercase) def __call__( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=False) -> Optional[int]: """simple docstring""" with torch.no_grad(): if not isinstance(_lowercase , _lowercase): lowercase__ = [images] if single_image: assert len(_lowercase) == 1 for i in range(len(_lowercase)): if isinstance(images[i] , torch.Tensor): images.insert(_lowercase , images.pop(_lowercase).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( _lowercase , torch.as_tensor(img_tensorize(images.pop(_lowercase) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge lowercase__ = torch.tensor([im.shape[:2] for im in images]) lowercase__ = self.aug(_lowercase) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowercase__ = [self.normalizer(_lowercase) for x in images] # now pad them to do the following operations lowercase__, lowercase__ = self.pad(_lowercase) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowercase__ = torch.true_divide(_lowercase , _lowercase) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _lowerCAmelCase ( A__ , A__ ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _lowerCAmelCase ( A__ , A__ ): assert torch.isfinite(UpperCamelCase__ ).all(), "Box tensor contains infinite or NaN!" lowercase__, lowercase__ = box_size tensor[:, 0].clamp_(min=0 , max=UpperCamelCase__ ) tensor[:, 1].clamp_(min=0 , max=UpperCamelCase__ ) tensor[:, 2].clamp_(min=0 , max=UpperCamelCase__ ) tensor[:, 3].clamp_(min=0 , max=UpperCamelCase__ )
703
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[Any] = None A : Optional[int] = None @property def UpperCAmelCase ( self : str) -> Union[str, Any]: """simple docstring""" return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCAmelCase ( self : int) -> Any: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) self.assertTrue(hasattr(lowerCAmelCase , 'feature_size')) self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate')) self.assertTrue(hasattr(lowerCAmelCase , 'padding_value')) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name]))) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_torch def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) @require_tf def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase) lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf') lowercase__ = processed_features[input_name] if len(batch_features_input.shape) < 3: lowercase__ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size)) def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = self.feat_extract_tester.seq_length_diff lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff lowercase__ = self.feat_extract_tester.min_seq_length lowercase__ = self.feat_extract_tester.batch_size lowercase__ = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest') lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1])) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') lowercase__ = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np') lowercase__ = input_a[input_name] self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) self.assertTrue(len(input_a[0]) == pad_min_length) self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0]))) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size) # test padding for `pad_to_multiple_of` for List[int] + numpy lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a)) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length)) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size) # Check padding value is correct lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[1])[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff)) < 1E-3) self.assertTrue( abs( np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length)) < 1E-3) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str: """simple docstring""" def _inputs_have_equal_length(lowerCAmelCase : int): lowercase__ = len(input[0]) for input_slice in input[1:]: if len(lowerCAmelCase) != length: return False return True def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]): if len(lowerCAmelCase) != len(lowerCAmelCase): return False for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase): if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3): return False return True lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase) lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) # truncate to smallest lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0])) lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to smallest with np lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(input_a.shape[1] == len(speech_inputs[0])) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) # truncate to middle lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np') lowercase__ = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1])) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase)) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1])) # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(lowerCAmelCase): feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowercase__ = 12 lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , ) lowercase__ = input_a[input_name] lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , ) lowercase__ = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowercase__ = len(speech_inputs[0]) if expected_length % pad_to_multiple_of != 0: lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0]) == expected_length) self.assertTrue(_inputs_have_equal_length(lowerCAmelCase)) self.assertFalse(_inputs_have_equal_length(lowerCAmelCase)) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" self._check_padding(numpify=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> int: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" self._check_truncation(numpify=lowerCAmelCase) @require_torch def UpperCAmelCase ( self : Dict) -> List[str]: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2) @require_tf def UpperCAmelCase ( self : str) -> str: """simple docstring""" lowercase__ = self.feature_extraction_class(**self.feat_extract_dict) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name] lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name] self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2) def UpperCAmelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" lowercase__ = self.feat_extract_dict lowercase__ = True lowercase__ = self.feature_extraction_class(**lowerCAmelCase) lowercase__ = self.feat_extract_tester.prepare_inputs_for_common() lowercase__ = [len(lowerCAmelCase) for x in speech_inputs] lowercase__ = feat_extract.model_input_names[0] lowercase__ = BatchFeature({input_name: speech_inputs}) lowercase__ = min(lowerCAmelCase) lowercase__ = feat_extract.pad( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np') self.assertIn('attention_mask' , lowerCAmelCase) self.assertListEqual( list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length]) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
642
0
from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCAmelCase__: '''simple docstring''' def __init__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : List[str]=30 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Union[str, Any]=32 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Union[str, Any]=37 , lowerCAmelCase : int="gelu" , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Optional[int]=10 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : int=None , lowerCAmelCase : Optional[Any]=2 , ) -> Any: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = is_training lowercase__ = use_labels lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = type_sequence_label_size lowercase__ = initializer_range lowercase__ = scope lowercase__ = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) lowercase__ = (image_size // patch_size) ** 2 lowercase__ = num_patches + 2 def UpperCAmelCase ( self : List[Any]) -> List[str]: """simple docstring""" lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = self.get_config() return config, pixel_values, labels def UpperCAmelCase ( self : str) -> str: """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str) -> Optional[Any]: """simple docstring""" lowercase__ = TFDeiTModel(config=lowercase_) lowercase__ = model(lowercase_) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Any) -> Any: """simple docstring""" lowercase__ = TFDeiTForMaskedImageModeling(config=lowercase_) lowercase__ = model(lowercase_) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images lowercase__ = 1 lowercase__ = TFDeiTForMaskedImageModeling(lowercase_) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) lowercase__ = model(lowercase_) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = self.type_sequence_label_size lowercase__ = TFDeiTForImageClassification(lowercase_) lowercase__ = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images lowercase__ = 1 lowercase__ = TFDeiTForImageClassification(lowercase_) lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) lowercase__ = model(lowercase_ , labels=lowercase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() lowercase__ = config_and_inputs lowercase__ = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase__( snake_case__ , snake_case__ , unittest.TestCase ): '''simple docstring''' A : Optional[int] = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) A : Union[str, Any] = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) A : Tuple = False A : str = False A : int = False A : str = False def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" lowercase__ = TFDeiTModelTester(self) lowercase__ = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37) def UpperCAmelCase ( self : Dict) -> Optional[Any]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='DeiT does not use inputs_embeds') def UpperCAmelCase ( self : Optional[Any]) -> List[Any]: """simple docstring""" pass def UpperCAmelCase ( self : Optional[Any]) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(lowercase_) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) lowercase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , tf.keras.layers.Dense)) def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ = model_class(lowercase_) lowercase__ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ = [*signature.parameters.keys()] lowercase__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_) def UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_) def UpperCAmelCase ( self : List[Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_) def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_) def UpperCAmelCase ( self : int , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]=False) -> Dict: """simple docstring""" lowercase__ = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ = TFDeiTModel.from_pretrained(lowercase_) self.assertIsNotNone(lowercase_) def _lowerCAmelCase ( ): lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCAmelCase ( self : Union[str, Any]) -> Any: """simple docstring""" return ( DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224') if is_vision_available() else None ) @slow def UpperCAmelCase ( self : List[Any]) -> Tuple: """simple docstring""" lowercase__ = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224') lowercase__ = self.default_image_processor lowercase__ = prepare_img() lowercase__ = image_processor(images=lowercase_ , return_tensors='tf') # forward pass lowercase__ = model(**lowercase_) # verify the logits lowercase__ = tf.TensorShape((1, 10_00)) self.assertEqual(outputs.logits.shape , lowercase_) lowercase__ = tf.constant([-1.02_66, 0.19_12, -1.28_61]) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4))
704
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _lowerCAmelCase ( A__ ): lowercase__ = prime_factors(A__ ) if is_square_free(A__ ): return -1 if len(A__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
642
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a__ : int = logging.get_logger(__name__) a__ : Optional[int] = { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json" ), } class UpperCAmelCase__( _UpperCAmelCase ): '''simple docstring''' A : Dict = '''deberta-v2''' def __init__( self : Union[str, Any] , lowerCAmelCase : Union[str, Any]=12_81_00 , lowerCAmelCase : List[Any]=15_36 , lowerCAmelCase : List[str]=24 , lowerCAmelCase : Dict=24 , lowerCAmelCase : Union[str, Any]=61_44 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : Tuple=0 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Tuple=1E-7 , lowerCAmelCase : str=False , lowerCAmelCase : Dict=-1 , lowerCAmelCase : str=0 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=0 , lowerCAmelCase : List[Any]="gelu" , **lowerCAmelCase : List[str] , ) -> List[str]: """simple docstring""" super().__init__(**A_ ) lowercase__ = hidden_size lowercase__ = num_hidden_layers lowercase__ = num_attention_heads lowercase__ = intermediate_size lowercase__ = hidden_act lowercase__ = hidden_dropout_prob lowercase__ = attention_probs_dropout_prob lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = initializer_range lowercase__ = relative_attention lowercase__ = max_relative_positions lowercase__ = pad_token_id lowercase__ = position_biased_input # Backwards compatibility if type(A_ ) == str: lowercase__ = [x.strip() for x in pos_att_type.lower().split('|' )] lowercase__ = pos_att_type lowercase__ = vocab_size lowercase__ = layer_norm_eps lowercase__ = kwargs.get('pooler_hidden_size' , A_ ) lowercase__ = pooler_dropout lowercase__ = pooler_hidden_act class UpperCAmelCase__( _UpperCAmelCase ): '''simple docstring''' @property def UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase__ = {0: 'batch', 1: 'sequence'} if self._config.type_vocab_size > 0: return OrderedDict( [('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] ) else: return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] ) @property def UpperCAmelCase ( self : Tuple ) -> int: """simple docstring""" return 12 def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : int = -1 , lowerCAmelCase : bool = False , lowerCAmelCase : Optional["TensorType"] = None , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 40 , lowerCAmelCase : int = 40 , lowerCAmelCase : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]: """simple docstring""" lowercase__ = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
705
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a__ : List[str] = logging.get_logger(__name__) a__ : List[Any] = { "microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json", } class UpperCAmelCase__( lowerCamelCase , lowerCamelCase ): '''simple docstring''' A : List[str] = "focalnet" def __init__( self : Dict , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=96 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : int=[1_92, 3_84, 7_68, 7_68] , lowerCAmelCase : str=[2, 2, 6, 2] , lowerCAmelCase : Tuple=[2, 2, 2, 2] , lowerCAmelCase : Optional[Any]=[3, 3, 3, 3] , lowerCAmelCase : int="gelu" , lowerCAmelCase : Any=4.0 , lowerCAmelCase : List[str]=0.0 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Union[str, Any]=False , lowerCAmelCase : Tuple=1E-4 , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Optional[int]=False , lowerCAmelCase : List[str]=False , lowerCAmelCase : str=0.02 , lowerCAmelCase : Optional[int]=1E-5 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str , ) -> List[str]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = image_size lowercase__ = patch_size lowercase__ = num_channels lowercase__ = embed_dim lowercase__ = use_conv_embed lowercase__ = hidden_sizes lowercase__ = depths lowercase__ = focal_levels lowercase__ = focal_windows lowercase__ = hidden_act lowercase__ = mlp_ratio lowercase__ = hidden_dropout_prob lowercase__ = drop_path_rate lowercase__ = use_layerscale lowercase__ = layerscale_value lowercase__ = use_post_layernorm lowercase__ = use_post_layernorm_in_modulation lowercase__ = normalize_modulator lowercase__ = initializer_range lowercase__ = layer_norm_eps lowercase__ = encoder_stride lowercase__ = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths) + 1)] lowercase__, lowercase__ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase , out_indices=lowerCAmelCase , stage_names=self.stage_names)
642
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available a__ : List[Any] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
706
import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Dict = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } a__ : str = { "vocab_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json" }, "merges_file": { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt" }, "tokenizer_config_file": { "facebook/blenderbot_small-90M": ( "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json" ) }, } a__ : Any = {"facebook/blenderbot_small-90M": 5_12} def _lowerCAmelCase ( A__ ): lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char lowercase__ = set(A__ ) return pairs class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : List[str] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : Tuple = ["input_ids", "attention_mask"] def __init__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : int="__start__" , lowerCAmelCase : Dict="__end__" , lowerCAmelCase : Any="__unk__" , lowerCAmelCase : str="__null__" , **lowerCAmelCase : Optional[Any] , ) -> List[str]: """simple docstring""" super().__init__(unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , **lowerCAmelCase) with open(lowerCAmelCase , encoding='utf-8') as vocab_handle: lowercase__ = json.load(lowerCAmelCase) lowercase__ = {v: k for k, v in self.encoder.items()} with open(lowerCAmelCase , encoding='utf-8') as merges_handle: lowercase__ = merges_handle.read().split('\n')[1:-1] lowercase__ = [tuple(merge.split()) for merge in merges] lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = {} @property def UpperCAmelCase ( self : int) -> int: """simple docstring""" return len(self.encoder) def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder) def UpperCAmelCase ( self : str , lowerCAmelCase : str) -> str: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = re.sub('([.,!?()])' , R' \1' , lowerCAmelCase) lowercase__ = re.sub('(\')' , R' \1 ' , lowerCAmelCase) lowercase__ = re.sub(R'\s{2,}' , ' ' , lowerCAmelCase) if "\n" in token: lowercase__ = token.replace('\n' , ' __newln__') lowercase__ = token.split(' ') lowercase__ = [] for token in tokens: if not len(lowerCAmelCase): continue lowercase__ = token.lower() lowercase__ = tuple(lowerCAmelCase) lowercase__ = tuple(list(word[:-1]) + [word[-1] + '</w>']) lowercase__ = get_pairs(lowerCAmelCase) if not pairs: words.append(lowerCAmelCase) continue while True: lowercase__ = min(lowerCAmelCase , key=lambda lowerCAmelCase: self.bpe_ranks.get(lowerCAmelCase , float('inf'))) if bigram not in self.bpe_ranks: break lowercase__, lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(lowerCAmelCase): try: lowercase__ = word.index(lowerCAmelCase , lowerCAmelCase) new_word.extend(word[i:j]) lowercase__ = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(lowerCAmelCase) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 lowercase__ = tuple(lowerCAmelCase) lowercase__ = new_word if len(lowerCAmelCase) == 1: break else: lowercase__ = get_pairs(lowerCAmelCase) lowercase__ = '@@ '.join(lowerCAmelCase) lowercase__ = word[:-4] lowercase__ = word words.append(lowerCAmelCase) return " ".join(lowerCAmelCase) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str) -> List[str]: """simple docstring""" lowercase__ = [] lowercase__ = re.findall(R'\S+\n?' , lowerCAmelCase) for token in words: split_tokens.extend(list(self.bpe(lowerCAmelCase).split(' '))) return split_tokens def UpperCAmelCase ( self : int , lowerCAmelCase : str) -> int: """simple docstring""" lowercase__ = token.lower() return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token)) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : int) -> str: """simple docstring""" return self.decoder.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : List[str]) -> str: """simple docstring""" lowercase__ = ' '.join(lowerCAmelCase).replace('@@ ' , '').strip() return out_string def UpperCAmelCase ( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" if not os.path.isdir(lowerCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join( lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file']) with open(lowerCAmelCase , 'w' , encoding='utf-8') as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase) + '\n') lowercase__ = 0 with open(lowerCAmelCase , 'w' , encoding='utf-8') as writer: writer.write('#version: 0.2\n') for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase: kv[1]): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!') lowercase__ = token_index writer.write(' '.join(lowerCAmelCase) + '\n') index += 1 return vocab_file, merge_file
642
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a__ : List[Any] = logging.get_logger(__name__) class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = ["pixel_values"] def __init__( self : str , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 2_55 , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase : Optional[Any] , ) -> None: """simple docstring""" super().__init__(**__UpperCamelCase) lowercase__ = size if size is not None else {'shortest_edge': 2_24} lowercase__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase) lowercase__ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24} lowercase__ = get_size_dict(__UpperCamelCase , param_name='crop_size') lowercase__ = do_resize lowercase__ = size lowercase__ = resample lowercase__ = do_center_crop lowercase__ = crop_size lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_normalize lowercase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowercase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def UpperCAmelCase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : str , ) -> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: lowercase__ = int((2_56 / 2_24) * size['shortest_edge']) lowercase__ = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase) lowercase__ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''') return resize( __UpperCamelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[Any] , ) -> np.ndarray: """simple docstring""" lowercase__ = get_size_dict(__UpperCamelCase) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''') return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ) -> np.ndarray: """simple docstring""" return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Optional[Any] , ) -> np.ndarray: """simple docstring""" return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : ImageInput , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Dict[str, int]] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[float] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase : Optional[TensorType] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ) -> BatchFeature: """simple docstring""" lowercase__ = do_resize if do_resize is not None else self.do_resize lowercase__ = resample if resample is not None else self.resample lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ = do_rescale if do_rescale is not None else self.do_rescale lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ = do_normalize if do_normalize is not None else self.do_normalize lowercase__ = image_mean if image_mean is not None else self.image_mean lowercase__ = image_std if image_std is not None else self.image_std lowercase__ = size if size is not None else self.size lowercase__ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase) lowercase__ = crop_size if crop_size is not None else self.crop_size lowercase__ = get_size_dict(__UpperCamelCase , param_name='crop_size') lowercase__ = make_list_of_images(__UpperCamelCase) if not valid_images(__UpperCamelCase): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. lowercase__ = [to_numpy_array(__UpperCamelCase) for image in images] if do_resize: lowercase__ = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) for image in images] if do_center_crop: lowercase__ = [self.center_crop(__UpperCamelCase , __UpperCamelCase) for image in images] if do_rescale: lowercase__ = [self.rescale(__UpperCamelCase , __UpperCamelCase) for image in images] if do_normalize: lowercase__ = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) for image in images] lowercase__ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase) for image in images] lowercase__ = {'pixel_values': images} return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase)
707
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ : Optional[int] = { "configuration_blenderbot": [ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotOnnxConfig", ], "tokenization_blenderbot": ["BlenderbotTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = ["BlenderbotTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForCausalLM", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : List[str] = [ "TFBlenderbotForConditionalGeneration", "TFBlenderbotModel", "TFBlenderbotPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : int = [ "FlaxBlenderbotForConditionalGeneration", "FlaxBlenderbotModel", "FlaxBlenderbotPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys a__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join import _pytest from transformers.testing_utils import HfDoctestModule, HfDocTestParser # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. a__ : Optional[int] = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def _lowerCAmelCase ( A__ ): config.addinivalue_line( 'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' ) config.addinivalue_line( 'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' ) config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' ) config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' ) config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' ) config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' ) def _lowerCAmelCase ( A__ ): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(A__ ) def _lowerCAmelCase ( A__ ): from transformers.testing_utils import pytest_terminal_summary_main lowercase__ = terminalreporter.config.getoption('--make-reports' ) if make_reports: pytest_terminal_summary_main(A__ , id=A__ ) def _lowerCAmelCase ( A__ , A__ ): if exitstatus == 5: lowercase__ = 0 # Doctest custom flag to ignore output. a__ : Dict = doctest.register_optionflag("IGNORE_RESULT") a__ : int = doctest.OutputChecker class UpperCAmelCase__( A_ ): '''simple docstring''' def UpperCAmelCase ( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int]) -> int: """simple docstring""" if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) a__ : Union[str, Any] = CustomOutputChecker a__ : Any = HfDoctestModule a__ : Dict = HfDocTestParser
708
import heapq import sys import numpy as np a__ : Dict = tuple[int, int] class UpperCAmelCase__: '''simple docstring''' def __init__( self : List[str]) -> Any: """simple docstring""" lowercase__ = [] lowercase__ = set() def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]: """simple docstring""" if not self.empty(): return self.elements[0][0] else: return float('inf') def UpperCAmelCase ( self : int) -> str: """simple docstring""" return len(self.elements) == 0 def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str]) -> List[str]: """simple docstring""" if item not in self.set: heapq.heappush(self.elements , (priority, item)) self.set.add(lowerCAmelCase) else: # update # print("update", item) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pri, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) temp.append((priority, item)) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : int) -> Tuple: """simple docstring""" if item in self.set: self.set.remove(lowerCAmelCase) lowercase__ = [] ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) while x != item: temp.append((pro, x)) ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy)) def UpperCAmelCase ( self : Dict) -> List[Any]: """simple docstring""" return self.elements[0][1] def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" ((lowercase__), (lowercase__)) = heapq.heappop(self.elements) self.set.remove(lowerCAmelCase) return (priority, item) def _lowerCAmelCase ( A__ , A__ ): # euclidean distance lowercase__ = np.array(A__ ) lowercase__ = np.array(A__ ) return np.linalg.norm(a - b ) def _lowerCAmelCase ( A__ , A__ ): # integer division by time variable return consistent_heuristic(A__ , A__ ) // t def _lowerCAmelCase ( A__ , A__ ): # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__ = g_function[start] + Wa * heuristics[i](A__ , A__ ) return ans def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = np.chararray((n, n) ) for i in range(A__ ): for j in range(A__ ): lowercase__ = '*' for i in range(A__ ): for j in range(A__ ): if (j, (n - 1) - i) in blocks: lowercase__ = '#' lowercase__ = '-' lowercase__ = back_pointer[goal] while x != start: ((lowercase__), (lowercase__)) = x # print(x) lowercase__ = '-' lowercase__ = back_pointer[x] lowercase__ = '-' for i in range(A__ ): for j in range(A__ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) lowercase__ = back_pointer[goal] while x != start: print(A__ , end=' ' ) lowercase__ = back_pointer[x] print(A__ ) sys.exit() def _lowerCAmelCase ( A__ ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def _lowerCAmelCase ( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ): for itera in range(A__ ): open_list[itera].remove_element(A__ ) # print("s", s) # print("j", j) ((lowercase__), (lowercase__)) = s lowercase__ = (x - 1, y) lowercase__ = (x + 1, y) lowercase__ = (x, y + 1) lowercase__ = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(A__ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(A__ ) lowercase__ = -1 lowercase__ = float('inf' ) if valid(A__ ) and g_function[neighbours] > g_function[s] + 1: lowercase__ = g_function[s] + 1 lowercase__ = s if neighbours not in close_list_anchor: open_list[0].put(A__ , key(A__ , 0 , A__ , A__ ) ) if neighbours not in close_list_inad: for var in range(1 , A__ ): if key(A__ , A__ , A__ , A__ ) <= Wa * key( A__ , 0 , A__ , A__ ): open_list[j].put( A__ , key(A__ , A__ , A__ , A__ ) ) def _lowerCAmelCase ( ): lowercase__ = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list a__ : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} a__ : Any = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] a__ : Any = make_common_ground() a__ : Union[str, Any] = blocks_blk # hyper parameters a__ : List[Any] = 1 a__ : List[str] = 1 a__ : Optional[int] = 20 a__ : Optional[Any] = 3 # one consistent and two other inconsistent # start and end destination a__ : Tuple = (0, 0) a__ : str = (n - 1, n - 1) a__ : Optional[Any] = 1 def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = {start: 0, goal: float('inf' )} lowercase__ = {start: -1, goal: -1} lowercase__ = [] lowercase__ = set() for i in range(A__ ): open_list.append(PriorityQueue() ) open_list[i].put(A__ , key(A__ , A__ , A__ , A__ ) ) lowercase__ = [] lowercase__ = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , A__ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__, lowercase__ = open_list[i].top_show() visited.add(A__ ) expand_state( A__ , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_inad.append(A__ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(A__ , A__ , A__ ) else: lowercase__ = open_list[0].top_show() visited.add(A__ ) expand_state( A__ , 0 , A__ , A__ , A__ , A__ , A__ , A__ , ) close_list_anchor.append(A__ ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(A__ ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
642
0
a__ : Optional[Any] = "Tobias Carryer" from time import time class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : Any=int(time())) -> Union[str, Any]: # noqa: B008 """simple docstring""" lowercase__ = multiplier lowercase__ = increment lowercase__ = modulo lowercase__ = seed def UpperCAmelCase ( self : Tuple) -> Tuple: """simple docstring""" lowercase__ = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. a__ : List[str] = LinearCongruentialGenerator(1_66_45_25, 10_13_90_42_23, 2 << 31) while True: print(lcg.next_number())
709
import math import sys def _lowerCAmelCase ( A__ ): lowercase__ = '' try: with open(A__ , 'rb' ) as binary_file: lowercase__ = binary_file.read() for dat in data: lowercase__ = F'''{dat:08b}''' result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = {'0': '0', '1': '1'} lowercase__, lowercase__ = '', '' lowercase__ = len(A__ ) for i in range(len(A__ ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue lowercase__ = lexicon[curr_string] result += last_match_id lowercase__ = last_match_id + '0' if math.loga(A__ ).is_integer(): lowercase__ = {} for curr_key in list(A__ ): lowercase__ = lexicon.pop(A__ ) lowercase__ = new_lex lowercase__ = last_match_id + '1' index += 1 lowercase__ = '' return result def _lowerCAmelCase ( A__ , A__ ): lowercase__ = 8 try: with open(A__ , 'wb' ) as opened_file: lowercase__ = [ to_write[i : i + byte_length] for i in range(0 , len(A__ ) , A__ ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(A__ , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def _lowerCAmelCase ( A__ ): lowercase__ = 0 for letter in data_bits: if letter == "1": break counter += 1 lowercase__ = data_bits[counter:] lowercase__ = data_bits[counter + 1 :] return data_bits def _lowerCAmelCase ( A__ , A__ ): lowercase__ = read_file_binary(A__ ) lowercase__ = remove_prefix(A__ ) lowercase__ = decompress_data(A__ ) write_file_binary(A__ , A__ ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
642
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available a__ : str = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["PoolFormerFeatureExtractor"] a__ : Tuple = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Dict = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys a__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
710
import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging a__ : int = logging.get_logger(__name__) a__ : Tuple = {"vocab_file": "vocab.txt"} a__ : int = { "vocab_file": { "facebook/esm2_t6_8M_UR50D": "https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt", "facebook/esm2_t12_35M_UR50D": "https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt", }, } a__ : Dict = { "facebook/esm2_t6_8M_UR50D": 10_24, "facebook/esm2_t12_35M_UR50D": 10_24, } def _lowerCAmelCase ( A__ ): with open(A__ , 'r' ) as f: lowercase__ = f.read().splitlines() return [l.strip() for l in lines] class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = VOCAB_FILES_NAMES A : str = PRETRAINED_VOCAB_FILES_MAP A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : List[Any] = ["input_ids", "attention_mask"] def __init__( self : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[int]="<unk>" , lowerCAmelCase : Dict="<cls>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : Union[str, Any]="<mask>" , lowerCAmelCase : Optional[Any]="<eos>" , **lowerCAmelCase : Any , ) -> Optional[Any]: """simple docstring""" super().__init__(**lowerCAmelCase) lowercase__ = load_vocab_file(lowerCAmelCase) lowercase__ = dict(enumerate(self.all_tokens)) lowercase__ = {tok: ind for ind, tok in enumerate(self.all_tokens)} lowercase__ = unk_token lowercase__ = cls_token lowercase__ = pad_token lowercase__ = mask_token lowercase__ = eos_token lowercase__ = self.all_tokens self._create_trie(self.unique_no_split_tokens) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str , **lowerCAmelCase : Union[str, Any]) -> Dict: """simple docstring""" return text.split() def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any=False) -> Union[str, Any]: """simple docstring""" return len(self._id_to_token) def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" return {token: i for i, token in enumerate(self.all_tokens)} def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : str) -> int: """simple docstring""" return self._token_to_id.get(lowerCAmelCase , self._token_to_id.get(self.unk_token)) def UpperCAmelCase ( self : Dict , lowerCAmelCase : int) -> str: """simple docstring""" return self._id_to_token.get(lowerCAmelCase , self.unk_token) def UpperCAmelCase ( self : Any , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.cls_token_id] lowercase__ = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!') return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List , lowerCAmelCase : Optional[List] = None , lowerCAmelCase : bool = False) -> List[int]: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if token in self.all_special_ids else 0 for token in token_ids_a] lowercase__ = [1] + ([0] * len(lowerCAmelCase)) + [1] if token_ids_a is not None: mask += [0] * len(lowerCAmelCase) + [1] return mask def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = os.path.join(lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt') with open(lowerCAmelCase , 'w') as f: f.write('\n'.join(self.all_tokens)) return (vocab_file,) @property def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" return self.get_vocab_size(with_added_tokens=lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Union[List[str], List[AddedToken]] , lowerCAmelCase : bool = False) -> int: """simple docstring""" return super()._add_tokens(lowerCAmelCase , special_tokens=lowerCAmelCase)
642
0
'''simple docstring''' from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name a__ : Optional[Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")\n >>> pipe_prior.to(\"cuda\")\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")\n >>> pipe.to(\"cuda\")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save(\"cat.png\")\n ```\n" def _lowerCAmelCase ( A__ , A__ , A__=8 ): lowercase__ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 lowercase__ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase__( lowercase_ ): '''simple docstring''' def __init__( self : Any , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : DDPMScheduler , lowerCAmelCase : VQModel , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , ) lowercase__ = 2 ** (len(self.movq.config.block_out_channels) - 1) def UpperCAmelCase ( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any) -> Optional[int]: """simple docstring""" if latents is None: lowercase__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''') lowercase__ = latents.to(UpperCamelCase__) lowercase__ = latents * scheduler.init_noise_sigma return latents def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[int]=0) -> Optional[int]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`') lowercase__ = torch.device(f'''cuda:{gpu_id}''') lowercase__ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase__ , UpperCamelCase__) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str=0) -> List[Any]: """simple docstring""" if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0'): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.') lowercase__ = torch.device(f'''cuda:{gpu_id}''') if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=UpperCamelCase__) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) lowercase__ = None for cpu_offloaded_model in [self.unet, self.movq]: lowercase__, lowercase__ = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__) # We'll offload the last model manually. lowercase__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" if not hasattr(self.unet , '_hf_hook'): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase__ , '_hf_hook') and hasattr(module._hf_hook , 'execution_device') and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase__) def __call__( self : Tuple , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 1_00 , lowerCAmelCase : float = 4.0 , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , ) -> str: """simple docstring""" lowercase__ = self._execution_device lowercase__ = guidance_scale > 1.0 if isinstance(UpperCamelCase__ , UpperCamelCase__): lowercase__ = torch.cat(UpperCamelCase__ , dim=0) lowercase__ = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCamelCase__ , UpperCamelCase__): lowercase__ = torch.cat(UpperCamelCase__ , dim=0) if do_classifier_free_guidance: lowercase__ = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0) lowercase__ = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0) lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=UpperCamelCase__) self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__) lowercase__ = self.scheduler.timesteps lowercase__ = self.unet.config.in_channels lowercase__, lowercase__ = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor) # create initial latent lowercase__ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase__)): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents lowercase__ = {'image_embeds': image_embeds} lowercase__ = self.unet( sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0] if do_classifier_free_guidance: lowercase__, lowercase__ = noise_pred.split(latents.shape[1] , dim=1) lowercase__, lowercase__ = noise_pred.chunk(2) lowercase__, lowercase__ = variance_pred.chunk(2) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , 'variance_type') and self.scheduler.config.variance_type in ["learned", "learned_range"] ): lowercase__, lowercase__ = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 lowercase__ = self.scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0] # post-processing lowercase__ = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__)['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''') if output_type in ["np", "pil"]: lowercase__ = image * 0.5 + 0.5 lowercase__ = image.clamp(0 , 1) lowercase__ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": lowercase__ = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__)
711
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo a__ : int = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n" a__ : Optional[Any] = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n" a__ : Tuple = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase__( datasets.Metric ): '''simple docstring''' def UpperCAmelCase ( self : List[Any]) -> MetricInfo: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token') , id='sequence'), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token') , id='sequence') , id='references'), }) , ) def UpperCAmelCase ( self : int , lowerCAmelCase : List[List[List[str]]] , lowerCAmelCase : List[List[str]] , lowerCAmelCase : int = 1 , lowerCAmelCase : int = 4 , ) -> Dict[str, float]: """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase , hypotheses=lowerCAmelCase , min_len=lowerCAmelCase , max_len=lowerCAmelCase) }
642
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class UpperCAmelCase__( __UpperCAmelCase ): '''simple docstring''' A : Dict = 42 class UpperCAmelCase__( __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' @register_to_config def __init__( self : Optional[int] , lowerCAmelCase : int = 6_55_36 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 2 , lowerCAmelCase : int = 0 , lowerCAmelCase : str = "fourier" , lowerCAmelCase : bool = True , lowerCAmelCase : bool = False , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase : str = None , lowerCAmelCase : Tuple[int] = (32, 32, 64) , lowerCAmelCase : str = None , lowerCAmelCase : int = 8 , lowerCAmelCase : int = 1 , lowerCAmelCase : bool = False , ) -> Union[str, Any]: """simple docstring""" super().__init__() lowercase__ = sample_size # time if time_embedding_type == "fourier": lowercase__ = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_) lowercase__ = 2 * block_out_channels[0] elif time_embedding_type == "positional": lowercase__ = Timesteps( block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_) lowercase__ = block_out_channels[0] if use_timestep_embedding: lowercase__ = block_out_channels[0] * 4 lowercase__ = TimestepEmbedding( in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , ) lowercase__ = nn.ModuleList([]) lowercase__ = None lowercase__ = nn.ModuleList([]) lowercase__ = None # down lowercase__ = in_channels for i, down_block_type in enumerate(lowerCAmelCase_): lowercase__ = output_channel lowercase__ = block_out_channels[i] if i == 0: input_channel += extra_in_channels lowercase__ = i == len(lowerCAmelCase_) - 1 lowercase__ = get_down_block( lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(lowerCAmelCase_) # mid lowercase__ = get_mid_block( lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , ) # up lowercase__ = list(reversed(lowerCAmelCase_)) lowercase__ = reversed_block_out_channels[0] if out_block_type is None: lowercase__ = out_channels else: lowercase__ = block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase_): lowercase__ = output_channel lowercase__ = ( reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_) - 1 else final_upsample_channels ) lowercase__ = i == len(lowerCAmelCase_) - 1 lowercase__ = get_up_block( lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(lowerCAmelCase_) lowercase__ = output_channel # out lowercase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32) lowercase__ = get_out_block( out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , ) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : torch.FloatTensor , lowerCAmelCase : Union[torch.Tensor, float, int] , lowerCAmelCase : bool = True , ) -> Optional[Any]: """simple docstring""" lowercase__ = timestep if not torch.is_tensor(lowerCAmelCase_): lowercase__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device) elif torch.is_tensor(lowerCAmelCase_) and len(timesteps.shape) == 0: lowercase__ = timesteps[None].to(sample.device) lowercase__ = self.time_proj(lowerCAmelCase_) if self.config.use_timestep_embedding: lowercase__ = self.time_mlp(lowerCAmelCase_) else: lowercase__ = timestep_embed[..., None] lowercase__ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype) lowercase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:])) # 2. down lowercase__ = () for downsample_block in self.down_blocks: lowercase__, lowercase__ = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_) down_block_res_samples += res_samples # 3. mid if self.mid_block: lowercase__ = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_) # 4. up for i, upsample_block in enumerate(self.up_blocks): lowercase__ = down_block_res_samples[-1:] lowercase__ = down_block_res_samples[:-1] lowercase__ = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_) # 5. post-process if self.out_block: lowercase__ = self.out_block(lowerCAmelCase_ , lowerCAmelCase_) if not return_dict: return (sample,) return UNetaDOutput(sample=lowerCAmelCase_)
712
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class UpperCAmelCase__: '''simple docstring''' def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]: """simple docstring""" lowercase__ = parent lowercase__ = batch_size lowercase__ = seq_length lowercase__ = is_training lowercase__ = use_input_mask lowercase__ = use_token_type_ids lowercase__ = use_labels lowercase__ = vocab_size lowercase__ = block_sizes lowercase__ = num_decoder_layers lowercase__ = d_model lowercase__ = n_head lowercase__ = d_head lowercase__ = d_inner lowercase__ = hidden_act lowercase__ = hidden_dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = max_position_embeddings lowercase__ = type_vocab_size lowercase__ = 2 lowercase__ = num_labels lowercase__ = num_choices lowercase__ = scope lowercase__ = initializer_std # Used in the tests to check the size of the first attention layer lowercase__ = n_head # Used in the tests to check the size of the first hidden state lowercase__ = self.d_model # Used in the tests to check the number of output hidden states/attentions lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowercase__ = self.num_hidden_layers + 2 def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) lowercase__ = None if self.use_input_mask: lowercase__ = random_attention_mask([self.batch_size, self.seq_length]) lowercase__ = None if self.use_token_type_ids: lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) lowercase__ = None lowercase__ = None lowercase__ = None if self.use_labels: lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size) lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) lowercase__ = ids_tensor([self.batch_size] , self.num_choices) lowercase__ = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int: """simple docstring""" lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) lowercase__ = False lowercase__ = TFFunnelModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) lowercase__ = [input_ids, input_mask] lowercase__ = model(lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model)) lowercase__ = False lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase) lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model)) def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str: """simple docstring""" lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]: """simple docstring""" lowercase__ = self.num_choices lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1)) lowercase__ = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]: """simple docstring""" lowercase__ = self.num_labels lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]: """simple docstring""" lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase) lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ = model(lowerCAmelCase) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = self.prepare_config_and_inputs() ( ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ( lowercase__ ), ) = config_and_inputs lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : int = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) A : Dict = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) A : Optional[int] = False A : Optional[int] = False def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = TFFunnelModelTester(self) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : int) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase) def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase) @require_tf class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Tuple = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) A : List[str] = False A : int = False def UpperCAmelCase ( self : Any) -> List[Any]: """simple docstring""" lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase) lowercase__ = ConfigTester(self , config_class=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowerCAmelCase) def UpperCAmelCase ( self : int) -> str: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" lowercase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
642
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def _lowerCAmelCase ( A__ , A__=0.9_99 , A__="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowercase__ = [] for i in range(a_ ): lowercase__ = i / num_diffusion_timesteps lowercase__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) , a_ ) ) return torch.tensor(a_ , dtype=torch.floataa ) class UpperCAmelCase__( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' A : int = [e.name for e in KarrasDiffusionSchedulers] A : Dict = 2 @register_to_config def __init__( self : Optional[int] , lowerCAmelCase : Dict = 10_00 , lowerCAmelCase : Tuple = 0.0_00_85 , lowerCAmelCase : List[str] = 0.0_12 , lowerCAmelCase : Optional[int] = "linear" , lowerCAmelCase : List[str] = None , lowerCAmelCase : Any = "epsilon" , lowerCAmelCase : List[Any] = "linspace" , lowerCAmelCase : List[str] = 0 , ) -> Optional[int]: """simple docstring""" if trained_betas is not None: lowercase__ = torch.tensor(_UpperCAmelCase , dtype=torch.floataa) elif beta_schedule == "linear": lowercase__ = torch.linspace(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , dtype=torch.floataa) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowercase__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _UpperCAmelCase , dtype=torch.floataa) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowercase__ = betas_for_alpha_bar(_UpperCAmelCase) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''') lowercase__ = 1.0 - self.betas lowercase__ = torch.cumprod(self.alphas , dim=0) # set all values self.set_timesteps(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int]=None) -> str: """simple docstring""" if schedule_timesteps is None: lowercase__ = self.timesteps lowercase__ = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter) == 0: lowercase__ = 1 if len(_UpperCAmelCase) > 1 else 0 else: lowercase__ = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase) else timestep lowercase__ = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str , ) -> Dict: """simple docstring""" lowercase__ = self.index_for_timestep(_UpperCAmelCase) if self.state_in_first_order: lowercase__ = self.sigmas[step_index] else: lowercase__ = self.sigmas_interpol[step_index] lowercase__ = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] = None , lowerCAmelCase : Optional[Any] = None , ) -> List[Any]: """simple docstring""" lowercase__ = num_inference_steps lowercase__ = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowercase__ = np.linspace(0 , num_train_timesteps - 1 , _UpperCAmelCase , dtype=_UpperCAmelCase)[::-1].copy() elif self.config.timestep_spacing == "leading": lowercase__ = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase__ = (np.arange(0 , _UpperCAmelCase) * step_ratio).round()[::-1].copy().astype(_UpperCAmelCase) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowercase__ = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase__ = (np.arange(_UpperCAmelCase , 0 , -step_ratio)).round().copy().astype(_UpperCAmelCase) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''') lowercase__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) lowercase__ = torch.from_numpy(np.log(_UpperCAmelCase)).to(_UpperCAmelCase) lowercase__ = np.interp(_UpperCAmelCase , np.arange(0 , len(_UpperCAmelCase)) , _UpperCAmelCase) lowercase__ = np.concatenate([sigmas, [0.0]]).astype(np.floataa) lowercase__ = torch.from_numpy(_UpperCAmelCase).to(device=_UpperCAmelCase) # interpolate sigmas lowercase__ = sigmas.log().lerp(sigmas.roll(1).log() , 0.5).exp() lowercase__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2), sigmas[-1:]]) lowercase__ = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2), sigmas_interpol[-1:]]) if str(_UpperCAmelCase).startswith('mps'): # mps does not support float64 lowercase__ = torch.from_numpy(_UpperCAmelCase).to(_UpperCAmelCase , dtype=torch.floataa) else: lowercase__ = torch.from_numpy(_UpperCAmelCase).to(_UpperCAmelCase) # interpolate timesteps lowercase__ = self.sigma_to_t(_UpperCAmelCase).to(_UpperCAmelCase , dtype=timesteps.dtype) lowercase__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1).flatten() lowercase__ = torch.cat([timesteps[:1], interleaved_timesteps]) lowercase__ = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowercase__ = defaultdict(_UpperCAmelCase) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Any) -> Optional[Any]: """simple docstring""" lowercase__ = sigma.log() # get distribution lowercase__ = log_sigma - self.log_sigmas[:, None] # get sigmas range lowercase__ = dists.ge(0).cumsum(dim=0).argmax(dim=0).clamp(max=self.log_sigmas.shape[0] - 2) lowercase__ = low_idx + 1 lowercase__ = self.log_sigmas[low_idx] lowercase__ = self.log_sigmas[high_idx] # interpolate sigmas lowercase__ = (low - log_sigma) / (low - high) lowercase__ = w.clamp(0 , 1) # transform interpolation to time range lowercase__ = (1 - w) * low_idx + w * high_idx lowercase__ = t.view(sigma.shape) return t @property def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" return self.sample is None def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] = True , ) -> int: """simple docstring""" lowercase__ = self.index_for_timestep(_UpperCAmelCase) # advance index counter by 1 lowercase__ = timestep.cpu().item() if torch.is_tensor(_UpperCAmelCase) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowercase__ = self.sigmas[step_index] lowercase__ = self.sigmas_interpol[step_index + 1] lowercase__ = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowercase__ = self.sigmas[step_index - 1] lowercase__ = self.sigmas_interpol[step_index] lowercase__ = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowercase__ = 0 lowercase__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowercase__ = sigma_hat if self.state_in_first_order else sigma_interpol lowercase__ = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowercase__ = sigma_hat if self.state_in_first_order else sigma_interpol lowercase__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample') else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''') if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowercase__ = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowercase__ = sigma_interpol - sigma_hat # store for 2nd order step lowercase__ = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowercase__ = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowercase__ = sigma_next - sigma_hat lowercase__ = self.sample lowercase__ = None lowercase__ = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_UpperCAmelCase) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : str , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , ) -> List[Any]: """simple docstring""" lowercase__ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype) if original_samples.device.type == "mps" and torch.is_floating_point(_UpperCAmelCase): # mps does not support float64 lowercase__ = self.timesteps.to(original_samples.device , dtype=torch.floataa) lowercase__ = timesteps.to(original_samples.device , dtype=torch.floataa) else: lowercase__ = self.timesteps.to(original_samples.device) lowercase__ = timesteps.to(original_samples.device) lowercase__ = [self.index_for_timestep(_UpperCAmelCase , _UpperCAmelCase) for t in timesteps] lowercase__ = sigmas[step_indices].flatten() while len(sigma.shape) < len(original_samples.shape): lowercase__ = sigma.unsqueeze(-1) lowercase__ = original_samples + noise * sigma return noisy_samples def __len__( self : List[str]) -> Tuple: """simple docstring""" return self.config.num_train_timesteps
713
def _lowerCAmelCase ( A__ , A__ , A__ ): if principal <= 0: raise Exception('Principal borrowed must be > 0' ) if rate_per_annum < 0: raise Exception('Rate of interest must be >= 0' ) if years_to_repay <= 0 or not isinstance(A__ , A__ ): raise Exception('Years to repay must be an integer > 0' ) # Yearly rate is divided by 12 to get monthly rate lowercase__ = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowercase__ = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
642
0
import tensorflow as tf from ...tf_utils import shape_list class UpperCAmelCase__( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=1 , lowerCAmelCase : Any=False , **lowerCAmelCase : Optional[int]) -> Any: """simple docstring""" super().__init__(**_A) lowercase__ = vocab_size lowercase__ = d_embed lowercase__ = d_proj lowercase__ = cutoffs + [vocab_size] lowercase__ = [0] + self.cutoffs lowercase__ = div_val lowercase__ = self.cutoffs[0] lowercase__ = len(self.cutoffs) - 1 lowercase__ = self.shortlist_size + self.n_clusters lowercase__ = keep_order lowercase__ = [] lowercase__ = [] def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[str]) -> List[str]: """simple docstring""" if self.n_clusters > 0: lowercase__ = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=_A , name='cluster_weight') lowercase__ = self.add_weight( shape=(self.n_clusters,) , initializer='zeros' , trainable=_A , name='cluster_bias') if self.div_val == 1: for i in range(len(self.cutoffs)): if self.d_proj != self.d_embed: lowercase__ = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=_A , name=f'''out_projs_._{i}''' , ) self.out_projs.append(_A) else: self.out_projs.append(_A) lowercase__ = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , ) lowercase__ = self.add_weight( shape=(self.vocab_size,) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias)) else: for i in range(len(self.cutoffs)): lowercase__, lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase__ = self.d_embed // (self.div_val**i) lowercase__ = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=_A , name=f'''out_projs_._{i}''') self.out_projs.append(_A) lowercase__ = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._weight''' , ) lowercase__ = self.add_weight( shape=(r_idx - l_idx,) , initializer='zeros' , trainable=_A , name=f'''out_layers_._{i}_._bias''' , ) self.out_layers.append((weight, bias)) super().build(_A) @staticmethod def UpperCAmelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None) -> int: """simple docstring""" lowercase__ = x if proj is not None: lowercase__ = tf.einsum('ibd,ed->ibe' , _A , _A) return tf.einsum('ibd,nd->ibn' , _A , _A) + b @staticmethod def UpperCAmelCase ( lowerCAmelCase : str , lowerCAmelCase : Dict) -> Union[str, Any]: """simple docstring""" lowercase__ = shape_list(_A) lowercase__ = tf.range(lp_size[0] , dtype=target.dtype) lowercase__ = tf.stack([r, target] , 1) return tf.gather_nd(_A , _A) def UpperCAmelCase ( self : int , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : int=True , lowerCAmelCase : Dict=False) -> int: """simple docstring""" lowercase__ = 0 if self.n_clusters == 0: lowercase__ = self._logit(_A , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0]) if target is not None: lowercase__ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_A , logits=_A) lowercase__ = tf.nn.log_softmax(_A , axis=-1) else: lowercase__ = shape_list(_A) lowercase__ = [] lowercase__ = tf.zeros(hidden_sizes[:2]) for i in range(len(self.cutoffs)): lowercase__, lowercase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: lowercase__ = (target >= l_idx) & (target < r_idx) lowercase__ = tf.where(_A) lowercase__ = tf.boolean_mask(_A , _A) - l_idx if self.div_val == 1: lowercase__ = self.out_layers[0][0][l_idx:r_idx] lowercase__ = self.out_layers[0][1][l_idx:r_idx] else: lowercase__ = self.out_layers[i][0] lowercase__ = self.out_layers[i][1] if i == 0: lowercase__ = tf.concat([cur_W, self.cluster_weight] , 0) lowercase__ = tf.concat([cur_b, self.cluster_bias] , 0) lowercase__ = self._logit(_A , _A , _A , self.out_projs[0]) lowercase__ = tf.nn.log_softmax(_A) out.append(head_logprob[..., : self.cutoffs[0]]) if target is not None: lowercase__ = tf.boolean_mask(_A , _A) lowercase__ = self._gather_logprob(_A , _A) else: lowercase__ = self._logit(_A , _A , _A , self.out_projs[i]) lowercase__ = tf.nn.log_softmax(_A) lowercase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster lowercase__ = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(_A) if target is not None: lowercase__ = tf.boolean_mask(_A , _A) lowercase__ = tf.boolean_mask(_A , _A) lowercase__ = self._gather_logprob(_A , _A) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(_A , -cur_logprob , shape_list(_A)) lowercase__ = tf.concat(_A , axis=-1) if target is not None: if return_mean: lowercase__ = tf.reduce_mean(_A) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(_A) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(_A , name=self.name , aggregation='mean' if return_mean else '') return out
714
from __future__ import annotations def _lowerCAmelCase ( A__ , A__ ): if b == 0: return (1, 0) ((lowercase__), (lowercase__)) = extended_euclid(A__ , a % b ) lowercase__ = a // b return (y, x - k * y) def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m def _lowerCAmelCase ( A__ , A__ ): ((lowercase__), (lowercase__)) = extended_euclid(A__ , A__ ) if b < 0: lowercase__ = (b % n + n) % n return b def _lowerCAmelCase ( A__ , A__ , A__ , A__ ): lowercase__, lowercase__ = invert_modulo(A__ , A__ ), invert_modulo(A__ , A__ ) lowercase__ = na * na lowercase__ = ra * x * na + ra * y * na return (n % m + m) % m if __name__ == "__main__": from doctest import testmod testmod(name="chinese_remainder_theorem", verbose=True) testmod(name="chinese_remainder_theorem2", verbose=True) testmod(name="invert_modulo", verbose=True) testmod(name="extended_euclid", verbose=True)
642
0
def _lowerCAmelCase ( A__ ): return " ".join( ''.join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
715
from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[Any] = { "google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Union[str, Any] = "umt5" A : List[str] = ["past_key_values"] def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str: """simple docstring""" super().__init__( is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = vocab_size lowercase__ = d_model lowercase__ = d_kv lowercase__ = d_ff lowercase__ = num_layers lowercase__ = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase__ = num_heads lowercase__ = relative_attention_num_buckets lowercase__ = relative_attention_max_distance lowercase__ = dropout_rate lowercase__ = layer_norm_epsilon lowercase__ = initializer_factor lowercase__ = feed_forward_proj lowercase__ = use_cache lowercase__ = self.feed_forward_proj.split('-') lowercase__ = act_info[-1] lowercase__ = act_info[0] == 'gated' if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": lowercase__ = 'gelu_new' @property def UpperCAmelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" return self.d_model @property def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" return self.num_heads @property def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return self.num_layers class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]: """simple docstring""" lowercase__ = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowercase__ = 'past_encoder_sequence + sequence' lowercase__ = {0: 'batch'} lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowercase__ = {0: 'batch', 1: 'decoder_sequence'} lowercase__ = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def UpperCAmelCase ( self : int) -> int: """simple docstring""" return 13 @property def UpperCAmelCase ( self : Optional[Any]) -> float: """simple docstring""" return 5E-4
642
0
import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : Any = LongformerTokenizer A : int = True A : Optional[int] = LongformerTokenizerFast A : Optional[Any] = True def UpperCAmelCase ( self : List[str]) -> int: """simple docstring""" super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase__ = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] lowercase__ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase)))) lowercase__ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowercase__ = {'unk_token': '<unk>'} lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file']) lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file']) with open(self.vocab_file , 'w' , encoding='utf-8') as fp: fp.write(json.dumps(lowerCAmelCase) + '\n') with open(self.merges_file , 'w' , encoding='utf-8') as fp: fp.write('\n'.join(lowerCAmelCase)) def UpperCAmelCase ( self : List[Any] , **lowerCAmelCase : List[Any]) -> Any: """simple docstring""" kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase) def UpperCAmelCase ( self : Tuple , **lowerCAmelCase : Optional[Any]) -> Union[str, Any]: """simple docstring""" kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any] , lowerCAmelCase : Dict) -> int: """simple docstring""" lowercase__ = 'lower newer' lowercase__ = 'lower newer' return input_text, output_text def UpperCAmelCase ( self : Dict) -> Union[str, Any]: """simple docstring""" lowercase__ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map) lowercase__ = 'lower newer' lowercase__ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er'] lowercase__ = tokenizer.tokenize(lowerCAmelCase) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokens + [tokenizer.unk_token] lowercase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" lowercase__ = self.get_tokenizer() self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase) , [0, 3_14_14, 2_32, 3_28, 2]) self.assertListEqual( tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , ) @slow def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096') lowercase__ = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase) lowercase__ = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase) lowercase__ = tokenizer.encode( 'sequence builders' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase) lowercase__ = tokenizer.encode( 'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase) lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase) lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def UpperCAmelCase ( self : List[str]) -> Any: """simple docstring""" lowercase__ = self.get_tokenizer() lowercase__ = 'Encode this sequence.' lowercase__ = tokenizer.byte_encoder[' '.encode('utf-8')[0]] # Testing encoder arguments lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase , add_prefix_space=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(lowerCAmelCase , lowerCAmelCase) tokenizer.add_special_tokens({'bos_token': '<s>'}) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(lowerCAmelCase , lowerCAmelCase) # Testing spaces after special tokens lowercase__ = '<mask>' tokenizer.add_special_tokens( {'mask_token': AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase)}) # mask token has a left space lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) lowercase__ = 'Encode <mask> sequence' lowercase__ = 'Encode <mask>sequence' lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = encoded.index(lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = encoded.index(lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" pass def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''): lowercase__ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase) lowercase__ = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase) lowercase__ = 'A, <mask> AllenNLP sentence.' lowercase__ = tokenizer_r.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase) lowercase__ = tokenizer_p.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids'])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , ) lowercase__ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids']) lowercase__ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids']) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2]) self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2]) self.assertSequenceEqual( lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>']) self.assertSequenceEqual( lowerCAmelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>']) def UpperCAmelCase ( self : List[Any]) -> List[str]: """simple docstring""" for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2): lowercase__ = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__()) lowercase__ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__()) self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase) self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase) self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Optional[int]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''): lowercase__ = 'hello' # `hello` is a token in the vocabulary of `pretrained_name` lowercase__ = f'''{text_of_1_token} {text_of_1_token}''' lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase) + 1, len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , ) lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase) + 1, len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , ) lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase), len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , ) lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase), len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , ) lowercase__ = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase) + 1, 1 + len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , ) lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase), 1 + len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , ) lowercase__ = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase , use_fast=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase) lowercase__ = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase))) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase), 1 + len(lowerCAmelCase) + 1 + len(lowerCAmelCase)) , )
716
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin a__ : Any = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : str = XGLMTokenizer A : List[Any] = XGLMTokenizerFast A : int = True A : Optional[Any] = True def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = '<pad>' lowercase__ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : str) -> List[str]: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<s>') self.assertEqual(vocab_keys[1] , '<pad>') self.assertEqual(len(lowerCAmelCase) , 10_08) def UpperCAmelCase ( self : List[str]) -> str: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 10_08) def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = XGLMTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = tokenizer.tokenize('This is a test') self.assertListEqual(lowerCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , ) lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ = tokenizer.convert_tokens_to_ids(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual( lowerCAmelCase , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return XGLMTokenizer.from_pretrained('facebook/xglm-564M') def UpperCAmelCase ( self : Optional[int]) -> Dict: """simple docstring""" with tempfile.NamedTemporaryFile() as f: shutil.copyfile(lowerCAmelCase , f.name) lowercase__ = XGLMTokenizer(f.name , keep_accents=lowerCAmelCase) lowercase__ = pickle.dumps(lowerCAmelCase) pickle.loads(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" if not self.test_rust_tokenizer: return lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.tokenize(lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @slow def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" lowercase__ = 'Hello World!' lowercase__ = [2, 3_12_27, 44_47, 35] self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : List[str]) -> Union[str, Any]: """simple docstring""" lowercase__ = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth' ) # fmt: off lowercase__ = [2, 10_18, 67, 11, 19_88, 26_17, 56_31, 2_78, 11, 34_07, 48, 7_16_30, 2_80_85, 4, 32_34, 1_57, 13, 6, 5, 6, 4, 35_26, 7_68, 15, 6_59, 57, 2_98, 39_83, 8_64, 1_29, 21, 6, 5, 1_36_75, 3_77, 6_52, 75_80, 1_03_41, 1_55, 28_17, 4_22, 16_66, 7, 16_74, 53, 1_13, 20_22_77, 1_78_92, 33, 60, 87, 4, 32_34, 1_57, 61, 26_67, 5_23_76, 19, 88, 23, 7_35] # fmt: on self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase)) @slow def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = { 'input_ids': [[2, 10_88_25, 11_63, 15, 8_80_10, 4_73, 1_58_98, 1_57, 1_36_72, 18_57, 3_12, 8, 23_80_21, 11_63, 53, 1_36_72, 18_57, 3_12, 8, 5_32_83, 18_23_96, 8, 1_85_66, 16, 3_67_33, 41_01, 8, 2_30, 24_40_17, 12_25_53, 7, 15, 13_25_97, 4, 2_93, 1_25_11, 76_10, 4, 34_14, 13_25_97, 9, 4, 3_23_61, 3_62, 4, 7_34, 2_85_12, 3_25_69, 18, 4, 3_23_61, 2_60_96, 1_49_82, 73, 1_87_15, 2_14_33, 23_52_61, 15, 4_92, 1_24_27, 16, 53, 1_87_15, 2_14_33, 6_54_54, 15, 2_36_59, 5_63, 16, 2_78, 5_97, 28_43, 5_95, 79_31, 18_23_96, 6_41_86, 22, 8_86, 5_95, 13_29_81, 53, 2_55_40, 34_49, 4_39_82, 3_99_01, 59_51, 8_78, 3_30, 4, 2_76_94, 8_02_69, 3_12, 53, 65_17, 1_17_80, 6_11, 2_04_08, 5], [2, 6, 13_25_97, 67, 4_28_97, 33, 5_92, 8, 16_37_29, 2_55_40, 3_61, 13_69_97, 10_95_14, 17_32_30, 7, 5_01, 60, 10_29_13, 1_96, 56_31, 2_35, 6_32_43, 4_73, 6, 23_17_57, 74, 52_77, 79_05, 53, 30_95, 3_73_17, 22, 4_54, 18_38_74, 5], [2, 2_68, 3_12_98, 4_65_30, 6, 13_29_35, 4_38_31, 7, 5_97, 32, 24, 36_88, 98_65, 5]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='facebook/xglm-564M' , padding=lowerCAmelCase , )
642
0
from graphs.minimum_spanning_tree_kruskal import kruskal def _lowerCAmelCase ( ): lowercase__ = 9 lowercase__ = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] lowercase__ = kruskal(A__ , A__ ) lowercase__ = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(A__ ) == sorted(A__ )
717
import argparse import hashlib # hashlib is only used inside the Test class import struct class UpperCAmelCase__: '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase : str) -> Optional[int]: """simple docstring""" lowercase__ = data lowercase__ = [0X6_7_4_5_2_3_0_1, 0XE_F_C_D_A_B_8_9, 0X9_8_B_A_D_C_F_E, 0X1_0_3_2_5_4_7_6, 0XC_3_D_2_E_1_F_0] @staticmethod def UpperCAmelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int]) -> str: """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0XF_F_F_F_F_F_F_F def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" lowercase__ = B'\x80' + B'\x00' * (63 - (len(self.data) + 8) % 64) lowercase__ = self.data + padding + struct.pack('>Q' , 8 * len(self.data)) return padded_data def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data) , 64) ] def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> List[Any]: """simple docstring""" lowercase__ = list(struct.unpack('>16L' , lowerCAmelCase)) + [0] * 64 for i in range(16 , 80): lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1) return w def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.padding() lowercase__ = self.split_blocks() for block in self.blocks: lowercase__ = self.expand_block(lowerCAmelCase) lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = self.h for i in range(0 , 80): if 0 <= i < 20: lowercase__ = (b & c) | ((~b) & d) lowercase__ = 0X5_A_8_2_7_9_9_9 elif 20 <= i < 40: lowercase__ = b ^ c ^ d lowercase__ = 0X6_E_D_9_E_B_A_1 elif 40 <= i < 60: lowercase__ = (b & c) | (b & d) | (c & d) lowercase__ = 0X8_F_1_B_B_C_D_C elif 60 <= i < 80: lowercase__ = b ^ c ^ d lowercase__ = 0XC_A_6_2_C_1_D_6 lowercase__, lowercase__, lowercase__, lowercase__, lowercase__ = ( self.rotate(lowerCAmelCase , 5) + f + e + k + expanded_block[i] & 0XF_F_F_F_F_F_F_F, a, self.rotate(lowerCAmelCase , 30), c, d, ) lowercase__ = ( self.h[0] + a & 0XF_F_F_F_F_F_F_F, self.h[1] + b & 0XF_F_F_F_F_F_F_F, self.h[2] + c & 0XF_F_F_F_F_F_F_F, self.h[3] + d & 0XF_F_F_F_F_F_F_F, self.h[4] + e & 0XF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h) def _lowerCAmelCase ( ): lowercase__ = B'Test String' assert SHAaHash(A__ ).final_hash() == hashlib.shaa(A__ ).hexdigest() # noqa: S324 def _lowerCAmelCase ( ): lowercase__ = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) lowercase__ = parser.parse_args() lowercase__ = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: lowercase__ = f.read() else: lowercase__ = bytes(A__ , 'utf-8' ) print(SHAaHash(A__ ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
642
0
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def _lowerCAmelCase ( A__ ): return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) a__ : List[Any] = "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n" class UpperCAmelCase__( __lowerCAmelCase ): '''simple docstring''' @staticmethod def UpperCAmelCase ( lowerCAmelCase : ArgumentParser) -> Optional[int]: """simple docstring""" lowercase__ = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Model\'s type.') train_parser.add_argument( '--tf_checkpoint' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='TensorFlow checkpoint path or folder.') train_parser.add_argument( '--pytorch_dump_output' , type=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to the PyTorch saved model output.') train_parser.add_argument('--config' , type=lowerCamelCase__ , default='' , help='Configuration file path or folder.') train_parser.add_argument( '--finetuning_task_name' , type=lowerCamelCase__ , default=lowerCamelCase__ , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=lowerCamelCase__) def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : str , *lowerCAmelCase : Optional[int] , ) -> List[Any]: """simple docstring""" lowercase__ = logging.get_logger('transformers-cli/converting') self._logger.info(f'''Loading model {model_type}''') lowercase__ = model_type lowercase__ = tf_checkpoint lowercase__ = pytorch_dump_output lowercase__ = config lowercase__ = finetuning_task_name def UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]: """simple docstring""" if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCamelCase__) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__) if "ckpt" in self._tf_checkpoint.lower(): lowercase__ = self._tf_checkpoint lowercase__ = '''''' else: lowercase__ = self._tf_checkpoint lowercase__ = '''''' convert_transfo_xl_checkpoint_to_pytorch( lowerCamelCase__ , self._config , self._pytorch_dump_output , lowerCamelCase__) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCamelCase__) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
718
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer a__ : List[Any] = logging.get_logger(__name__) a__ : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart a__ : List[Any] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } a__ : int = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : Optional[Any] = VOCAB_FILES_NAMES A : Dict = PRETRAINED_VOCAB_FILES_MAP A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A : int = ["input_ids", "attention_mask"] A : Any = BartTokenizer def __init__( self : List[Any] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : str="replace" , lowerCAmelCase : str="<s>" , lowerCAmelCase : int="</s>" , lowerCAmelCase : Optional[int]="</s>" , lowerCAmelCase : Union[str, Any]="<s>" , lowerCAmelCase : str="<unk>" , lowerCAmelCase : int="<pad>" , lowerCAmelCase : int="<mask>" , lowerCAmelCase : Dict=False , lowerCAmelCase : List[Any]=True , **lowerCAmelCase : Optional[Any] , ) -> Optional[Any]: """simple docstring""" super().__init__( lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , unk_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , trim_offsets=lowerCAmelCase , **lowerCAmelCase , ) lowercase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = getattr(lowerCAmelCase , pre_tok_state.pop('type')) lowercase__ = add_prefix_space lowercase__ = pre_tok_class(**lowerCAmelCase) lowercase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ = 'post_processor' lowercase__ = getattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) if tokenizer_component_instance: lowercase__ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ = tuple(state['sep']) if "cls" in state: lowercase__ = tuple(state['cls']) lowercase__ = False if state.get('add_prefix_space' , lowerCAmelCase) != add_prefix_space: lowercase__ = add_prefix_space lowercase__ = True if state.get('trim_offsets' , lowerCAmelCase) != trim_offsets: lowercase__ = trim_offsets lowercase__ = True if changes_to_apply: lowercase__ = getattr(lowerCAmelCase , state.pop('type')) lowercase__ = component_class(**lowerCAmelCase) setattr(self.backend_tokenizer , lowerCAmelCase , lowerCAmelCase) @property def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def UpperCAmelCase ( self : Tuple , lowerCAmelCase : int) -> Optional[int]: """simple docstring""" lowercase__ = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase) if isinstance(lowerCAmelCase , lowerCAmelCase) else value lowercase__ = value def UpperCAmelCase ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[int]) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : str , *lowerCAmelCase : Tuple , **lowerCAmelCase : str) -> BatchEncoding: """simple docstring""" lowercase__ = kwargs.get('is_split_into_words' , lowerCAmelCase) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None) -> Tuple[str]: """simple docstring""" lowercase__ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase) return tuple(lowerCAmelCase) def UpperCAmelCase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=None) -> Tuple: """simple docstring""" lowercase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
642
0
import colorsys from PIL import Image # type: ignore def _lowerCAmelCase ( A__ , A__ , A__ ): lowercase__ = x lowercase__ = y for step in range(snake_case_ ): # noqa: B007 lowercase__ = a * a - b * b + x lowercase__ = 2 * a * b + y lowercase__ = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def _lowerCAmelCase ( A__ ): if distance == 1: return (0, 0, 0) else: return (255, 255, 255) def _lowerCAmelCase ( A__ ): if distance == 1: return (0, 0, 0) else: return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) ) def _lowerCAmelCase ( A__ = 800 , A__ = 600 , A__ = -0.6 , A__ = 0 , A__ = 3.2 , A__ = 50 , A__ = True , ): lowercase__ = Image.new('RGB' , (image_width, image_height) ) lowercase__ = img.load() # loop through the image-coordinates for image_x in range(snake_case_ ): for image_y in range(snake_case_ ): # determine the figure-coordinates based on the image-coordinates lowercase__ = figure_width / image_width * image_height lowercase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width lowercase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height lowercase__ = get_distance(snake_case_ , snake_case_ , snake_case_ ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: lowercase__ = get_color_coded_rgb(snake_case_ ) else: lowercase__ = get_black_and_white_rgb(snake_case_ ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure a__ : List[Any] = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
719
import torch from diffusers import DDIMParallelScheduler from .test_schedulers import SchedulerCommonTest class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : str = (DDIMParallelScheduler,) A : Any = (("eta", 0.0), ("num_inference_steps", 50)) def UpperCAmelCase ( self : Union[str, Any] , **lowerCAmelCase : Optional[int]) -> Dict: """simple docstring""" lowercase__ = { 'num_train_timesteps': 10_00, 'beta_start': 0.00_01, 'beta_end': 0.02, 'beta_schedule': 'linear', 'clip_sample': True, } config.update(**lowerCAmelCase) return config def UpperCAmelCase ( self : int , **lowerCAmelCase : str) -> Union[str, Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(**lowerCAmelCase) lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase) for t in scheduler.timesteps: lowercase__ = model(lowerCAmelCase , lowerCAmelCase) lowercase__ = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase).prev_sample return sample def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" for timesteps in [1_00, 5_00, 10_00]: self.check_over_configs(num_train_timesteps=lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase) lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config(steps_offset=1) lowercase__ = scheduler_class(**lowerCAmelCase) scheduler.set_timesteps(5) assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1])) def UpperCAmelCase ( self : str) -> Tuple: """simple docstring""" for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]): self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> str: """simple docstring""" for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> List[str]: """simple docstring""" for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" for clip_sample in [True, False]: self.check_over_configs(clip_sample=lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> int: """simple docstring""" for timestep_spacing in ["trailing", "leading"]: self.check_over_configs(timestep_spacing=lowerCAmelCase) def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" for rescale_betas_zero_snr in [True, False]: self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" self.check_over_configs(thresholding=lowerCAmelCase) for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs( thresholding=lowerCAmelCase , prediction_type=lowerCAmelCase , sample_max_value=lowerCAmelCase , ) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" for t in [1, 10, 49]: self.check_over_forward(time_step=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> int: """simple docstring""" for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00]): self.check_over_forward(time_step=lowerCAmelCase , num_inference_steps=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]): self.check_over_forward(time_step=lowerCAmelCase , eta=lowerCAmelCase) def UpperCAmelCase ( self : Union[str, Any]) -> List[Any]: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00) - 0.1_47_71)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60) - 0.3_24_60)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86) - 0.0_09_79)) < 1E-5 assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98) - 0.02)) < 1E-5 def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" lowercase__ = self.scheduler_classes[0] lowercase__ = self.get_scheduler_config() lowercase__ = scheduler_class(**lowerCAmelCase) lowercase__, lowercase__ = 10, 0.0 scheduler.set_timesteps(lowerCAmelCase) lowercase__ = self.dummy_model() lowercase__ = self.dummy_sample_deter lowercase__ = self.dummy_sample_deter + 0.1 lowercase__ = self.dummy_sample_deter - 0.1 lowercase__ = samplea.shape[0] lowercase__ = torch.stack([samplea, samplea, samplea] , dim=0) lowercase__ = torch.arange(lowerCAmelCase)[0:3, None].repeat(1 , lowerCAmelCase) lowercase__ = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1)) lowercase__ = scheduler.batch_step_no_noise(lowerCAmelCase , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , lowerCAmelCase) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 11_47.79_04) < 1E-2 assert abs(result_mean.item() - 0.49_82) < 1E-3 def UpperCAmelCase ( self : Any) -> int: """simple docstring""" lowercase__ = self.full_loop() lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_72.00_67) < 1E-2 assert abs(result_mean.item() - 0.22_39_67) < 1E-3 def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(prediction_type='v_prediction') lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 52.53_02) < 1E-2 assert abs(result_mean.item() - 0.06_84) < 1E-3 def UpperCAmelCase ( self : str) -> Dict: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.82_95) < 1E-2 assert abs(result_mean.item() - 0.19_51) < 1E-3 def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = self.full_loop(set_alpha_to_one=lowerCAmelCase , beta_start=0.01) lowercase__ = torch.sum(torch.abs(lowerCAmelCase)) lowercase__ = torch.mean(torch.abs(lowerCAmelCase)) assert abs(result_sum.item() - 1_49.07_84) < 1E-2 assert abs(result_mean.item() - 0.19_41) < 1E-3
642
0
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a__ : Dict = get_tests_dir("fixtures/spiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase__( lowercase_ , unittest.TestCase ): '''simple docstring''' A : str = DebertaVaTokenizer A : Tuple = DebertaVaTokenizerFast A : Tuple = True A : Optional[Any] = True def UpperCAmelCase ( self : int) -> int: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing lowercase__ = DebertaVaTokenizer(lowerCAmelCase , unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]) -> str: """simple docstring""" lowercase__ = 'this is a test' lowercase__ = 'this is a test' return input_text, output_text def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" lowercase__ = '<pad>' lowercase__ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase) , lowerCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase) , lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> int: """simple docstring""" lowercase__ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , '<pad>') self.assertEqual(vocab_keys[1] , '<unk>') self.assertEqual(vocab_keys[-1] , '[PAD]') self.assertEqual(len(lowerCAmelCase) , 3_00_01) def UpperCAmelCase ( self : int) -> List[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = ' \tHeLLo!how \n Are yoU? ' lowercase__ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on lowercase__ = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def UpperCAmelCase ( self : int) -> Union[str, Any]: """simple docstring""" pass def UpperCAmelCase ( self : Tuple) -> int: """simple docstring""" lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on lowercase__ = DebertaVaTokenizer(lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> List[str]: """simple docstring""" lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on lowercase__ = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on lowercase__ = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> str: """simple docstring""" lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on lowercase__ = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[Any]: """simple docstring""" lowercase__ = ' \tHeLLo!how \n Are yoU? ' lowercase__ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on lowercase__ = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : Optional[int]) -> List[Any]: """simple docstring""" lowercase__ = self.get_tokenizer() lowercase__ = self.get_rust_tokenizer() lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) lowercase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase)) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = self.get_rust_tokenizer() lowercase__ = tokenizer.encode(lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : Tuple) -> List[str]: """simple docstring""" lowercase__ = 'This is a test' lowercase__ = [13, 1, 43_98, 25, 21, 12_89] lowercase__ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] lowercase__ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] lowercase__ = DebertaVaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = DebertaVaTokenizerFast(lowerCAmelCase , keep_accents=lowerCAmelCase) lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) # fmt: off lowercase__ = 'I was born in 92000, and this is falsé.' lowercase__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] lowercase__ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] lowercase__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on lowercase__ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = rust_tokenizer.tokenize(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) lowercase__ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase) self.assertListEqual(lowerCAmelCase , lowerCAmelCase) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" lowercase__ = DebertaVaTokenizer(lowerCAmelCase) lowercase__ = tokenizer.encode('sequence builders') lowercase__ = tokenizer.encode('multi-sequence build') lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase) lowercase__ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase , ) @slow def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = {'input_ids': [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
720
import cva import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : float , lowerCAmelCase : int) -> Dict: """simple docstring""" if k in (0.04, 0.06): lowercase__ = k lowercase__ = window_size else: raise ValueError('invalid k value') def __str__( self : Tuple) -> str: """simple docstring""" return str(self.k) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : str) -> tuple[cva.Mat, list[list[int]]]: """simple docstring""" lowercase__ = cva.imread(lowerCAmelCase , 0) lowercase__, lowercase__ = img.shape lowercase__ = [] lowercase__ = img.copy() lowercase__ = cva.cvtColor(lowerCAmelCase , cva.COLOR_GRAY2RGB) lowercase__, lowercase__ = np.gradient(lowerCAmelCase) lowercase__ = dx**2 lowercase__ = dy**2 lowercase__ = dx * dy lowercase__ = 0.04 lowercase__ = self.window_size // 2 for y in range(lowerCAmelCase , h - offset): for x in range(lowerCAmelCase , w - offset): lowercase__ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowercase__ = (wxx * wyy) - (wxy**2) lowercase__ = wxx + wyy lowercase__ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0) , 0) color_img.itemset((y, x, 1) , 0) color_img.itemset((y, x, 2) , 2_55) return color_img, corner_list if __name__ == "__main__": a__ : Dict = HarrisCorner(0.0_4, 3) a__ , a__ : Dict = edge_detect.detect("path_to_image") cva.imwrite("detect.png", color_img)
642
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCAmelCase__( unittest.TestCase ): '''simple docstring''' def __init__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=7 , lowerCAmelCase : Optional[Any]=3 , lowerCAmelCase : List[str]=30 , lowerCAmelCase : List[str]=4_00 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Any=None , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Any=[0.5, 0.5, 0.5] , lowerCAmelCase : int=[0.5, 0.5, 0.5] , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[Any]=1 / 2_55 , lowerCAmelCase : Optional[int]=True , ) -> Optional[Any]: """simple docstring""" lowercase__ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} lowercase__ = parent lowercase__ = batch_size lowercase__ = num_channels lowercase__ = min_resolution lowercase__ = max_resolution lowercase__ = do_resize lowercase__ = size lowercase__ = do_normalize lowercase__ = image_mean lowercase__ = image_std lowercase__ = do_rescale lowercase__ = rescale_factor lowercase__ = do_pad def UpperCAmelCase ( self : str) -> Any: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any]=False) -> Optional[Any]: """simple docstring""" if not batched: lowercase__ = image_inputs[0] if isinstance(lowerCAmelCase , Image.Image): lowercase__, lowercase__ = image.size else: lowercase__, lowercase__ = image.shape[1], image.shape[2] if w < h: lowercase__ = int(self.size['shortest_edge'] * h / w) lowercase__ = self.size['shortest_edge'] elif w > h: lowercase__ = self.size['shortest_edge'] lowercase__ = int(self.size['shortest_edge'] * w / h) else: lowercase__ = self.size['shortest_edge'] lowercase__ = self.size['shortest_edge'] else: lowercase__ = [] for image in image_inputs: lowercase__, lowercase__ = self.get_expected_values([image]) expected_values.append((expected_height, expected_width)) lowercase__ = max(lowerCAmelCase , key=lambda lowerCAmelCase: item[0])[0] lowercase__ = max(lowerCAmelCase , key=lambda lowerCAmelCase: item[1])[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ): '''simple docstring''' A : List[Any] = DeformableDetrImageProcessor if is_vision_available() else None def UpperCAmelCase ( self : Tuple) -> str: """simple docstring""" lowercase__ = DeformableDetrImageProcessingTester(self) @property def UpperCAmelCase ( self : str) -> int: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase , 'image_mean')) self.assertTrue(hasattr(lowerCAmelCase , 'image_std')) self.assertTrue(hasattr(lowerCAmelCase , 'do_normalize')) self.assertTrue(hasattr(lowerCAmelCase , 'do_resize')) self.assertTrue(hasattr(lowerCAmelCase , 'do_rescale')) self.assertTrue(hasattr(lowerCAmelCase , 'do_pad')) self.assertTrue(hasattr(lowerCAmelCase , 'size')) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33}) self.assertEqual(image_processor.do_pad , lowerCAmelCase) lowercase__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84}) self.assertEqual(image_processor.do_pad , lowerCAmelCase) def UpperCAmelCase ( self : List[Any]) -> Any: """simple docstring""" pass def UpperCAmelCase ( self : Any) -> Any: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , Image.Image) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values lowercase__, lowercase__ = self.image_processor_tester.get_expected_values(lowerCAmelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__, lowercase__ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase) lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , np.ndarray) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values lowercase__, lowercase__ = self.image_processor_tester.get_expected_values(lowerCAmelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values lowercase__, lowercase__ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase) for image in image_inputs: self.assertIsInstance(lowerCAmelCase , torch.Tensor) # Test not batched input lowercase__ = image_processing(image_inputs[0] , return_tensors='pt').pixel_values lowercase__, lowercase__ = self.image_processor_tester.get_expected_values(lowerCAmelCase) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ = image_processing(lowerCAmelCase , return_tensors='pt').pixel_values lowercase__, lowercase__ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r') as f: lowercase__ = json.loads(f.read()) lowercase__ = {'image_id': 3_97_69, 'annotations': target} # encode them lowercase__ = DeformableDetrImageProcessor() lowercase__ = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors='pt') # verify pixel values lowercase__ = torch.Size([1, 3, 8_00, 10_66]) self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase) lowercase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4)) # verify area lowercase__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase)) # verify boxes lowercase__ = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase) lowercase__ = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase , atol=1E-3)) # verify image_id lowercase__ = torch.tensor([3_97_69]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase)) # verify is_crowd lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase)) # verify class_labels lowercase__ = torch.tensor([75, 75, 63, 65, 17, 17]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase)) # verify orig_size lowercase__ = torch.tensor([4_80, 6_40]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase)) # verify size lowercase__ = torch.tensor([8_00, 10_66]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase)) @slow def UpperCAmelCase ( self : Union[str, Any]) -> Tuple: """simple docstring""" lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png') with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r') as f: lowercase__ = json.loads(f.read()) lowercase__ = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} lowercase__ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic') # encode them lowercase__ = DeformableDetrImageProcessor(format='coco_panoptic') lowercase__ = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors='pt') # verify pixel values lowercase__ = torch.Size([1, 3, 8_00, 10_66]) self.assertEqual(encoding['pixel_values'].shape , lowerCAmelCase) lowercase__ = torch.tensor([0.27_96, 0.31_38, 0.34_81]) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4)) # verify area lowercase__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47]) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowerCAmelCase)) # verify boxes lowercase__ = torch.Size([6, 4]) self.assertEqual(encoding['labels'][0]['boxes'].shape , lowerCAmelCase) lowercase__ = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25]) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowerCAmelCase , atol=1E-3)) # verify image_id lowercase__ = torch.tensor([3_97_69]) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowerCAmelCase)) # verify is_crowd lowercase__ = torch.tensor([0, 0, 0, 0, 0, 0]) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowerCAmelCase)) # verify class_labels lowercase__ = torch.tensor([17, 17, 63, 75, 75, 93]) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowerCAmelCase)) # verify masks lowercase__ = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowerCAmelCase) # verify orig_size lowercase__ = torch.tensor([4_80, 6_40]) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowerCAmelCase)) # verify size lowercase__ = torch.tensor([8_00, 10_66]) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowerCAmelCase))
721
from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Dict = logging.get_logger(__name__) a__ : List[Any] = { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' A : int = "speech_to_text" A : Optional[Any] = ["past_key_values"] A : Optional[int] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[int] , lowerCAmelCase : Tuple=1_00_00 , lowerCAmelCase : int=12 , lowerCAmelCase : int=20_48 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : str=6 , lowerCAmelCase : Dict=20_48 , lowerCAmelCase : Dict=4 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Dict="relu" , lowerCAmelCase : Tuple=2_56 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=0.0 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : List[Any]=2 , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Union[str, Any]=2 , lowerCAmelCase : Any=60_00 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Optional[Any]=(5, 5) , lowerCAmelCase : Union[str, Any]=10_24 , lowerCAmelCase : List[Any]=80 , lowerCAmelCase : List[str]=1 , **lowerCAmelCase : List[str] , ) -> Dict: """simple docstring""" lowercase__ = vocab_size lowercase__ = d_model lowercase__ = encoder_ffn_dim lowercase__ = encoder_layers lowercase__ = encoder_attention_heads lowercase__ = decoder_ffn_dim lowercase__ = decoder_layers lowercase__ = decoder_attention_heads lowercase__ = dropout lowercase__ = attention_dropout lowercase__ = activation_dropout lowercase__ = activation_function lowercase__ = init_std lowercase__ = encoder_layerdrop lowercase__ = decoder_layerdrop lowercase__ = use_cache lowercase__ = encoder_layers lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ = max_source_positions lowercase__ = max_target_positions lowercase__ = num_conv_layers lowercase__ = list(lowerCAmelCase) lowercase__ = conv_channels lowercase__ = input_feat_per_channel lowercase__ = input_channels if len(self.conv_kernel_sizes) != self.num_conv_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ' f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, ''' f'''`config.num_conv_layers = {self.num_conv_layers}`.''') super().__init__( pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
642
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCAmelCase__( lowerCamelCase ): '''simple docstring''' def __init__( self : Any , lowerCAmelCase : AutoencoderKL , lowerCAmelCase : CLIPTextModel , lowerCAmelCase : CLIPTokenizer , lowerCAmelCase : UNetaDConditionModel , lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase : StableDiffusionSafetyChecker , lowerCAmelCase : CLIPImageProcessor , ) -> Optional[Any]: """simple docstring""" super().__init__() self.register_modules( vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , ) def UpperCAmelCase ( self : List[str] , lowerCAmelCase : Optional[Union[str, int]] = "auto") -> List[Any]: """simple docstring""" if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowerCAmelCase) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" self.enable_attention_slicing(lowerCAmelCase) @torch.no_grad() def __call__( self : Dict , lowerCAmelCase : Union[str, List[str]] , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 5_12 , lowerCAmelCase : int = 50 , lowerCAmelCase : float = 7.5 , lowerCAmelCase : Optional[Union[str, List[str]]] = None , lowerCAmelCase : Optional[int] = 1 , lowerCAmelCase : float = 0.0 , lowerCAmelCase : Optional[torch.Generator] = None , lowerCAmelCase : Optional[torch.FloatTensor] = None , lowerCAmelCase : Optional[str] = "pil" , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase : int = 1 , lowerCAmelCase : Optional[torch.FloatTensor] = None , **lowerCAmelCase : str , ) -> List[Any]: """simple docstring""" if isinstance(lowerCAmelCase , lowerCAmelCase): lowercase__ = 1 elif isinstance(lowerCAmelCase , lowerCAmelCase): lowercase__ = len(lowerCAmelCase) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase)}''') if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''') if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCAmelCase , lowerCAmelCase) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(lowerCAmelCase)}.''') # get prompt text embeddings lowercase__ = self.tokenizer( lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) lowercase__ = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :]) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f''' {self.tokenizer.model_max_length} tokens: {removed_text}''') lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowercase__ = self.text_encoder(text_input_ids.to(self.device))[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowercase__, lowercase__, lowercase__ = text_embeddings.shape lowercase__ = text_embeddings.repeat(1 , lowerCAmelCase , 1) lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase , -1) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase__ = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase__ = 42 if negative_prompt is None: lowercase__ = [''] elif type(lowerCAmelCase) is not type(lowerCAmelCase): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase)} !=''' f''' {type(lowerCAmelCase)}.''') elif isinstance(lowerCAmelCase , lowerCAmelCase): lowercase__ = [negative_prompt] elif batch_size != len(lowerCAmelCase): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase)}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ' the batch size of `prompt`.') else: lowercase__ = negative_prompt lowercase__ = text_input_ids.shape[-1] lowercase__ = self.tokenizer( lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='pt' , ) lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device))[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase__ = uncond_embeddings.shape[1] lowercase__ = uncond_embeddings.repeat(lowerCAmelCase , lowerCAmelCase , 1) lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase , -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ = torch.cat([uncond_embeddings, text_embeddings]) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) lowercase__ = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowercase__ = torch.randn( lowerCAmelCase , generator=lowerCAmelCase , device='cpu' , dtype=lowerCAmelCase).to(self.device) lowercase__ = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device='cpu' , dtype=lowerCAmelCase).to( self.device) else: lowercase__ = torch.randn( lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase) lowercase__ = torch.randn(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''') lowercase__ = latents_reference.to(self.device) lowercase__ = latents.to(self.device) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2 lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2 lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowercase__ = 0 if dx < 0 else dx lowercase__ = 0 if dy < 0 else dy lowercase__ = max(-dx , 0) lowercase__ = max(-dy , 0) # import pdb # pdb.set_trace() lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(lowerCAmelCase) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowercase__ = self.scheduler.timesteps.to(self.device) # scale the initial noise by the standard deviation required by the scheduler lowercase__ = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys()) lowercase__ = {} if accepts_eta: lowercase__ = eta for i, t in enumerate(self.progress_bar(lowerCAmelCase)): # expand the latents if we are doing classifier free guidance lowercase__ = torch.cat([latents] * 2) if do_classifier_free_guidance else latents lowercase__ = self.scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase) # predict the noise residual lowercase__ = self.unet(lowerCAmelCase , lowerCAmelCase , encoder_hidden_states=lowerCAmelCase).sample # perform guidance if do_classifier_free_guidance: lowercase__, lowercase__ = noise_pred.chunk(2) lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowercase__ = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase) lowercase__ = 1 / 0.1_82_15 * latents lowercase__ = self.vae.decode(lowerCAmelCase).sample lowercase__ = (image / 2 + 0.5).clamp(0 , 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowercase__ = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if self.safety_checker is not None: lowercase__ = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase) , return_tensors='pt').to( self.device) lowercase__, lowercase__ = self.safety_checker( images=lowerCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype)) else: lowercase__ = None if output_type == "pil": lowercase__ = self.numpy_to_pil(lowerCAmelCase) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=lowerCAmelCase , nsfw_content_detected=lowerCAmelCase)
700
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) a__ : Any = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Optional[int] = ["ReformerTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Union[str, Any] = ["ReformerTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ : Any = [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", "ReformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
642
0
from ..utils import is_flax_available, is_torch_available if is_torch_available(): from .autoencoder_kl import AutoencoderKL from .controlnet import ControlNetModel from .dual_transformer_ad import DualTransformeraDModel from .modeling_utils import ModelMixin from .prior_transformer import PriorTransformer from .ta_film_transformer import TaFilmDecoder from .transformer_ad import TransformeraDModel from .unet_ad import UNetaDModel from .unet_ad import UNetaDModel from .unet_ad_condition import UNetaDConditionModel from .unet_ad_condition import UNetaDConditionModel from .vq_model import VQModel if is_flax_available(): from .controlnet_flax import FlaxControlNetModel from .unet_ad_condition_flax import FlaxUNetaDConditionModel from .vae_flax import FlaxAutoencoderKL
701
# Imports import numpy as np class UpperCAmelCase__: '''simple docstring''' def __init__( self : Any , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[str]=None) -> Dict: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Tuple=None , lowerCAmelCase : str=None , lowerCAmelCase : str=None) -> int: """simple docstring""" if red is not None: lowercase__ = red if green is not None: lowercase__ = green if blue is not None: lowercase__ = blue if red_edge is not None: lowercase__ = red_edge if nir is not None: lowercase__ = nir return True def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Union[str, Any]="" , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=None) -> Union[str, Any]: """simple docstring""" self.set_matricies(red=lowerCAmelCase , green=lowerCAmelCase , blue=lowerCAmelCase , red_edge=lowerCAmelCase , nir=lowerCAmelCase) lowercase__ = { 'ARVI2': self.arvaa, 'CCCI': self.ccci, 'CVI': self.cvi, 'GLI': self.gli, 'NDVI': self.ndvi, 'BNDVI': self.bndvi, 'redEdgeNDVI': self.red_edge_ndvi, 'GNDVI': self.gndvi, 'GBNDVI': self.gbndvi, 'GRNDVI': self.grndvi, 'RBNDVI': self.rbndvi, 'PNDVI': self.pndvi, 'ATSAVI': self.atsavi, 'BWDRVI': self.bwdrvi, 'CIgreen': self.ci_green, 'CIrededge': self.ci_rededge, 'CI': self.ci, 'CTVI': self.ctvi, 'GDVI': self.gdvi, 'EVI': self.evi, 'GEMI': self.gemi, 'GOSAVI': self.gosavi, 'GSAVI': self.gsavi, 'Hue': self.hue, 'IVI': self.ivi, 'IPVI': self.ipvi, 'I': self.i, 'RVI': self.rvi, 'MRVI': self.mrvi, 'MSAVI': self.m_savi, 'NormG': self.norm_g, 'NormNIR': self.norm_nir, 'NormR': self.norm_r, 'NGRDI': self.ngrdi, 'RI': self.ri, 'S': self.s, 'IF': self._if, 'DVI': self.dvi, 'TVI': self.tvi, 'NDRE': self.ndre, } try: return funcs[index]() except KeyError: print('Index not in the list!') return False def UpperCAmelCase ( self : Optional[int]) -> List[str]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def UpperCAmelCase ( self : int) -> Any: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def UpperCAmelCase ( self : str) -> Optional[int]: """simple docstring""" return self.nir * (self.red / (self.green**2)) def UpperCAmelCase ( self : List[str]) -> Optional[int]: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def UpperCAmelCase ( self : Union[str, Any]) -> str: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def UpperCAmelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def UpperCAmelCase ( self : Dict) -> int: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def UpperCAmelCase ( self : Optional[Any]) -> Dict: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : List[Any]=0.08 , lowerCAmelCase : Optional[int]=1.22 , lowerCAmelCase : int=0.03) -> List[Any]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def UpperCAmelCase ( self : int) -> Tuple: """simple docstring""" return (self.nir / self.green) - 1 def UpperCAmelCase ( self : Any) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def UpperCAmelCase ( self : Any) -> List[str]: """simple docstring""" return (self.red - self.blue) / self.red def UpperCAmelCase ( self : Any) -> Optional[int]: """simple docstring""" lowercase__ = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2)) def UpperCAmelCase ( self : List[Any]) -> str: """simple docstring""" return self.nir - self.green def UpperCAmelCase ( self : Tuple) -> List[Any]: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def UpperCAmelCase ( self : Any) -> Union[str, Any]: """simple docstring""" lowercase__ = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red) def UpperCAmelCase ( self : int , lowerCAmelCase : int=0.16) -> Dict: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def UpperCAmelCase ( self : str , lowerCAmelCase : Optional[int]=0.5) -> Union[str, Any]: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def UpperCAmelCase ( self : str) -> int: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue)) def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int=None , lowerCAmelCase : List[str]=None) -> Tuple: """simple docstring""" return (self.nir - b) / (a * self.red) def UpperCAmelCase ( self : int) -> Dict: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def UpperCAmelCase ( self : int) -> str: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def UpperCAmelCase ( self : Optional[int]) -> Optional[int]: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def UpperCAmelCase ( self : Tuple) -> Any: """simple docstring""" return self.green / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Any) -> Optional[Any]: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def UpperCAmelCase ( self : List[Any]) -> Dict: """simple docstring""" return self.red / (self.nir + self.red + self.green) def UpperCAmelCase ( self : Optional[Any]) -> Any: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def UpperCAmelCase ( self : Dict) -> Tuple: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def UpperCAmelCase ( self : str) -> int: """simple docstring""" lowercase__ = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)]) lowercase__ = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)]) return (max_value - min_value) / max_value def UpperCAmelCase ( self : Optional[int]) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def UpperCAmelCase ( self : int) -> Optional[Any]: """simple docstring""" return self.nir / self.red def UpperCAmelCase ( self : Dict) -> Dict: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def UpperCAmelCase ( self : str) -> List[Any]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
642
0