code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" def snake_case ( A__ ): return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] ) def snake_case ( A__ ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(A__ ) % 2) != 0: raise ValueError( "Base16 encoded data is invalid:\nData does not have an even number of hex digits." ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(A__ ) <= set("0123456789ABCDEF" ): raise ValueError( "Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] ,16 ) for i in range(0 ,len(A__ ) ,2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
268
1
"""simple docstring""" from __future__ import annotations def snake_case ( A__ ): UpperCAmelCase_ : Dict = len(A__ ) # We need to create solution object to save path. UpperCAmelCase_ : Dict = [[0 for _ in range(A__ )] for _ in range(A__ )] UpperCAmelCase_ : Optional[int] = run_maze(A__ ,0 ,0 ,A__ ) if solved: print("\n".join(str(A__ ) for row in solutions ) ) else: print("No solution exists!" ) return solved def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : Any = len(A__ ) # Final check point. if i == j == (size - 1): UpperCAmelCase_ : Optional[int] = 1 return True UpperCAmelCase_ : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds UpperCAmelCase_ : List[Any] = (i < size) and (j < size) # Check upper bounds if lower_flag and upper_flag: # check for already visited and block points. UpperCAmelCase_ : int = (not solutions[i][j]) and (not maze[i][j]) if block_flag: # check visited UpperCAmelCase_ : List[str] = 1 # check for directions if ( run_maze(A__ ,i + 1 ,A__ ,A__ ) or run_maze(A__ ,A__ ,j + 1 ,A__ ) or run_maze(A__ ,i - 1 ,A__ ,A__ ) or run_maze(A__ ,A__ ,j - 1 ,A__ ) ): return True UpperCAmelCase_ : Optional[Any] = 0 return False return False if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self : Any , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def snake_case ( A__ ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def snake_case ( A__ ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def snake_case ( A__ ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def snake_case ( ): # Main function for testing. UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Any = Node(2 ) UpperCAmelCase_ : Optional[Any] = Node(3 ) UpperCAmelCase_ : Union[str, Any] = Node(4 ) UpperCAmelCase_ : int = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Any = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : List[Any] = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("Tree is: " ) display(A__ ) if __name__ == "__main__": main()
268
1
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class UpperCamelCase_ (__A ): def __init__( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Dict ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : str=None ) -> str: UpperCAmelCase_ : Any = {} UpperCAmelCase_ : List[Any] = {} if prompt is not None: UpperCAmelCase_ : List[Any] = prompt if generate_kwargs is not None: UpperCAmelCase_ : str = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: UpperCAmelCase_ : List[Any] = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( "'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter," " please use only one" ) UpperCAmelCase_ : Union[str, Any] = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Tuple , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : int ) -> List[Any]: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=None ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = load_image(lowerCAmelCase_ ) if prompt is not None: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise ValueError( f"""Received an invalid text input, got - {type(lowerCAmelCase_ )} - but expected a single string. """ "Note also that one single text can be provided for conditional image to text generation." ) UpperCAmelCase_ : List[str] = self.model.config.model_type if model_type == "git": UpperCAmelCase_ : Optional[int] = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) UpperCAmelCase_ : Any = self.tokenizer(text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids UpperCAmelCase_ : Optional[int] = [self.tokenizer.cls_token_id] + input_ids UpperCAmelCase_ : Dict = torch.tensor(lowerCAmelCase_ ).unsqueeze(0 ) model_inputs.update({"input_ids": input_ids} ) elif model_type == "pix2struct": UpperCAmelCase_ : Union[str, Any] = self.image_processor(images=lowerCAmelCase_ , header_text=lowerCAmelCase_ , return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation UpperCAmelCase_ : Optional[int] = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) UpperCAmelCase_ : str = self.tokenizer(lowerCAmelCase_ , return_tensors=self.framework ) model_inputs.update(lowerCAmelCase_ ) else: raise ValueError(f"""Model type {model_type} does not support conditional text generation""" ) else: UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: UpperCAmelCase_ : Optional[Any] = None return model_inputs def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str]=None ) -> Dict: # Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the # pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first. if ( "input_ids" in model_inputs and isinstance(model_inputs["input_ids"] , lowerCAmelCase_ ) and all(x is None for x in model_inputs["input_ids"] ) ): UpperCAmelCase_ : str = None if generate_kwargs is None: UpperCAmelCase_ : Any = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. UpperCAmelCase_ : Optional[Any] = model_inputs.pop(self.model.main_input_name ) UpperCAmelCase_ : List[Any] = self.model.generate(lowerCAmelCase_ , **lowerCAmelCase_ , **lowerCAmelCase_ ) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] ) -> int: UpperCAmelCase_ : List[Any] = [] for output_ids in model_outputs: UpperCAmelCase_ : int = { "generated_text": self.tokenizer.decode( lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , ) } records.append(lowerCAmelCase_ ) return records
268
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): if index == number_of_items: return 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack( A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 ) return max(A__ ,A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int = 13 , lowerCAmelCase_ : int = 64 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 128 , lowerCAmelCase_ : str=[16, 32, 64, 128] , lowerCAmelCase_ : int = 7 , lowerCAmelCase_ : int = 4 , lowerCAmelCase_ : int = 37 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 10 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 128 , lowerCAmelCase_ : List[int] = [2, 2, 2, 2] , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> Tuple: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Dict = image_size UpperCAmelCase_ : Union[str, Any] = patch_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : Optional[int] = use_labels UpperCAmelCase_ : Optional[Any] = hidden_size UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Optional[int] = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : List[Any] = hidden_dropout_prob UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob UpperCAmelCase_ : int = type_sequence_label_size UpperCAmelCase_ : Optional[int] = initializer_range UpperCAmelCase_ : Tuple = encoder_stride UpperCAmelCase_ : List[Any] = num_attention_outputs UpperCAmelCase_ : int = embed_dim UpperCAmelCase_ : Dict = embed_dim + 1 UpperCAmelCase_ : List[Any] = resolution UpperCAmelCase_ : List[str] = depths UpperCAmelCase_ : Optional[Any] = hidden_sizes UpperCAmelCase_ : Optional[Any] = dim UpperCAmelCase_ : Any = mlp_expansion_ratio def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : str = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> Optional[Any]: UpperCAmelCase_ : List[str] = TFEfficientFormerModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Any: UpperCAmelCase_ : int = self.type_sequence_label_size UpperCAmelCase_ : Optional[int] = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : List[Any] = 1 UpperCAmelCase_ : List[str] = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]: UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = config_and_inputs UpperCAmelCase_ : str = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) __magic_name__ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: UpperCAmelCase_ : Tuple = TFEfficientFormerModelTester(self ) UpperCAmelCase_ : Dict = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="EfficientFormer does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: pass @unittest.skip(reason="EfficientFormer does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> int: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ): UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : List[str] = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) if hasattr(self.model_tester , "encoder_seq_length" ): UpperCAmelCase_ : Tuple = self.model_tester.encoder_seq_length if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1: UpperCAmelCase_ : int = seq_length * self.model_tester.chunk_length else: UpperCAmelCase_ : List[str] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: UpperCAmelCase_ : List[str] = outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = getattr(self.model_tester , "seq_length" , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : str = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any]=False ) -> Any: UpperCAmelCase_ : Any = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Dict = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = True UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester , "seq_length" , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = getattr(self.model_tester , "key_length" , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = getattr(self.model_tester , "chunk_length" , lowerCAmelCase_ ) if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ): UpperCAmelCase_ : Tuple = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Optional[int] = False UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase_ : Any = True UpperCAmelCase_ : Tuple = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes UpperCAmelCase_ : str = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ ) self.assertTrue(outputs_dict is not None ) def snake_case ( ): UpperCAmelCase_ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: return ( EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" ) UpperCAmelCase_ : Optional[int] = self.default_image_processor UpperCAmelCase_ : Optional[int] = prepare_img() UpperCAmelCase_ : Optional[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Optional[Any] = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : Any = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : Optional[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( "snap-research/efficientformer-l1-300" ) UpperCAmelCase_ : int = self.default_image_processor UpperCAmelCase_ : List[str] = prepare_img() UpperCAmelCase_ : List[str] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[Any] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
268
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_upernet''': ['''UperNetConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''UperNetForSemanticSegmentation''', '''UperNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class UpperCamelCase_ (__A ): def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]: UpperCAmelCase_ : str = {} if top_k is not None: UpperCAmelCase_ : List[str] = top_k return {}, {}, postprocess_params def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any: UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ ) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any: if top_k > self.model.config.num_labels: UpperCAmelCase_ : int = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ ) elif self.framework == "tf": UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase_ : int = scores.tolist() UpperCAmelCase_ : Optional[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
268
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BioGptTokenizer __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Optional[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase_ : List[Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) UpperCAmelCase_ : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] ) -> Tuple: UpperCAmelCase_ : str = "lower newer" UpperCAmelCase_ : Dict = "lower newer" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: UpperCAmelCase_ : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase_ : Any = "lower" UpperCAmelCase_ : List[str] = ["low", "er</w>"] UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase_ : List[Any] = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) UpperCAmelCase_ : str = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
268
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCamelCase_ (__A ): __magic_name__ = '''detr''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : str = use_timm_backbone UpperCAmelCase_ : Optional[Any] = backbone_config UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Dict = num_queries UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = encoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = encoder_layers UpperCAmelCase_ : Optional[int] = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : Optional[int] = decoder_attention_heads UpperCAmelCase_ : List[Any] = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : List[str] = activation_function UpperCAmelCase_ : Optional[int] = init_std UpperCAmelCase_ : Union[str, Any] = init_xavier_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Any = auxiliary_loss UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : List[str] = backbone UpperCAmelCase_ : int = use_pretrained_backbone UpperCAmelCase_ : Any = dilation # Hungarian matcher UpperCAmelCase_ : str = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : List[str] = mask_loss_coefficient UpperCAmelCase_ : Dict = dice_loss_coefficient UpperCAmelCase_ : Any = bbox_loss_coefficient UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase_ : int = eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self.d_model @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]: return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]: UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : Any = self.__class__.model_type return output class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 12
268
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCamelCase_ (__A ): def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = dataset UpperCAmelCase_ : int = process UpperCAmelCase_ : int = params def __len__( self : Union[str, Any] ) -> List[str]: return len(self.dataset ) def __getitem__( self : Tuple , lowerCAmelCase_ : List[str] ) -> int: UpperCAmelCase_ : Any = self.dataset[i] UpperCAmelCase_ : int = self.process(lowerCAmelCase_ , **self.params ) return processed class UpperCamelCase_ (__A ): def __init__( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]: UpperCAmelCase_ : List[Any] = loader UpperCAmelCase_ : List[str] = infer UpperCAmelCase_ : Optional[int] = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : int = loader_batch_size # Internal bookkeeping UpperCAmelCase_ : Union[str, Any] = None UpperCAmelCase_ : Optional[Any] = None def __len__( self : List[Any] ) -> Optional[int]: return len(self.loader ) def __iter__( self : Tuple ) -> List[str]: UpperCAmelCase_ : str = iter(self.loader ) return self def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice UpperCAmelCase_ : Optional[int] = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) UpperCAmelCase_ : Any = {} for k, element in self._loader_batch_data.items(): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # Convert ModelOutput to tuple first UpperCAmelCase_ : Optional[Any] = element.to_tuple() if isinstance(element[0] , torch.Tensor ): UpperCAmelCase_ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase_ : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): UpperCAmelCase_ : Tuple = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): UpperCAmelCase_ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around UpperCAmelCase_ : List[str] = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase_ : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers UpperCAmelCase_ : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. UpperCAmelCase_ : Tuple = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 UpperCAmelCase_ : Dict = self._loader_batch_data.__class__(lowerCAmelCase_ ) self._loader_batch_index += 1 return result def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch UpperCAmelCase_ : List[str] = next(self.iterator ) UpperCAmelCase_ : List[Any] = self.infer(lowerCAmelCase_ , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(lowerCAmelCase_ , torch.Tensor ): UpperCAmelCase_ : Optional[int] = processed else: UpperCAmelCase_ : List[Any] = list(processed.keys() )[0] UpperCAmelCase_ : Dict = processed[key] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ ) else: UpperCAmelCase_ : List[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase_ : List[Any] = observed_batch_size # Setting internal index to unwrap the batch UpperCAmelCase_ : Any = processed UpperCAmelCase_ : Dict = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None ) -> Tuple: super().__init__(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def __iter__( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = iter(self.loader ) UpperCAmelCase_ : int = None return self def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: if self.subiterator is None: UpperCAmelCase_ : List[Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item UpperCAmelCase_ : List[str] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators UpperCAmelCase_ : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) UpperCAmelCase_ : Dict = next(self.subiterator ) return processed class UpperCamelCase_ (__A ): def __iter__( self : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = iter(self.loader ) return self def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: # Extremely similar to PipelineIterator in its unpacking mechanism # BUT, we have an extra required item which is the presence of `is_last` # That is because everything is flattened by `PipelineChunkIterator` we # need to keep track of how to regroup here in the original `process` # boundaries so that `process` and `postprocess` see the same data. # This iterator accumulates items (possibly while unbatching) until it # its a `is_last` and then just passes it on to the caller. UpperCAmelCase_ : Optional[Any] = False UpperCAmelCase_ : List[Any] = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase_ : List[str] = self.loader_batch_item() UpperCAmelCase_ : Any = item.pop("is_last" ) accumulator.append(lowerCAmelCase_ ) if is_last: return accumulator while not is_last: UpperCAmelCase_ : Tuple = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(lowerCAmelCase_ , torch.Tensor ): UpperCAmelCase_ : Optional[Any] = processed else: UpperCAmelCase_ : Optional[Any] = list(processed.keys() )[0] UpperCAmelCase_ : List[Any] = processed[key] if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : str = len(lowerCAmelCase_ ) else: UpperCAmelCase_ : Union[str, Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. UpperCAmelCase_ : Optional[int] = observed_batch_size UpperCAmelCase_ : List[str] = processed UpperCAmelCase_ : int = 0 while self._loader_batch_index < self.loader_batch_size: UpperCAmelCase_ : Optional[int] = self.loader_batch_item() UpperCAmelCase_ : int = item.pop("is_last" ) accumulator.append(lowerCAmelCase_ ) if is_last: return accumulator else: UpperCAmelCase_ : Dict = processed UpperCAmelCase_ : Union[str, Any] = item.pop("is_last" ) accumulator.append(lowerCAmelCase_ ) return accumulator class UpperCamelCase_ (__A ): def __init__( self : Union[str, Any] , lowerCAmelCase_ : Dataset , lowerCAmelCase_ : str ) -> str: UpperCAmelCase_ : str = dataset UpperCAmelCase_ : List[Any] = key def __len__( self : Any ) -> str: return len(self.dataset ) def __getitem__( self : str , lowerCAmelCase_ : Any ) -> Union[str, Any]: return self.dataset[i][self.key] class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : Dataset , lowerCAmelCase_ : str , lowerCAmelCase_ : str ) -> List[str]: UpperCAmelCase_ : List[Any] = dataset UpperCAmelCase_ : Tuple = keya UpperCAmelCase_ : Union[str, Any] = keya def __len__( self : Tuple ) -> int: return len(self.dataset ) def __getitem__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> int: return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
268
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .tokenization_wavaveca import WavaVecaCTCTokenizer class UpperCamelCase_ (__A ): __magic_name__ = '''Wav2Vec2FeatureExtractor''' __magic_name__ = '''AutoTokenizer''' def __init__( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> str: super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.feature_extractor UpperCAmelCase_ : int = False @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Tuple: try: return super().from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) except OSError: warnings.warn( f"""Loading a tokenizer inside {cls.__name__} from a config that does not""" " include a `tokenizer_class` attribute is deprecated and will be " "removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`" " attribute to either your `config.json` or `tokenizer_config.json` " "file to suppress this warning: " , lowerCAmelCase_ , ) UpperCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : str = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) return cls(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ ) def __call__( self : Optional[int] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[Any] ) -> List[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ ) if "raw_speech" in kwargs: warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." ) UpperCAmelCase_ : Union[str, Any] = kwargs.pop("raw_speech" ) else: UpperCAmelCase_ : List[str] = kwargs.pop("audio" , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = kwargs.pop("sampling_rate" , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = kwargs.pop("text" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: UpperCAmelCase_ : Optional[int] = args[0] UpperCAmelCase_ : Optional[Any] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: UpperCAmelCase_ : Dict = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: UpperCAmelCase_ : Optional[int] = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: UpperCAmelCase_ : Tuple = encodings["input_ids"] return inputs def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: # For backward compatibility if self._in_target_context_manager: return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = kwargs.pop("input_features" , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = kwargs.pop("labels" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: UpperCAmelCase_ : Any = args[0] UpperCAmelCase_ : List[str] = args[1:] if input_features is not None: UpperCAmelCase_ : List[Any] = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) if labels is not None: UpperCAmelCase_ : Any = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ ) if labels is None: return input_features elif input_features is None: return labels else: UpperCAmelCase_ : List[str] = labels["input_ids"] return input_features def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Any ) -> int: return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Dict ) -> int: return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @contextmanager def _SCREAMING_SNAKE_CASE ( self : int ) -> int: warnings.warn( "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your " "labels by using the argument `text` of the regular `__call__` method (either in the same call as " "your audio inputs, or in a separate call." ) UpperCAmelCase_ : str = True UpperCAmelCase_ : Optional[int] = self.tokenizer yield UpperCAmelCase_ : str = self.feature_extractor UpperCAmelCase_ : str = False
268
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
1
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint lowerCamelCase_ = { '''169M''': 12, '''430M''': 24, '''1B5''': 24, '''3B''': 32, '''7B''': 32, '''14B''': 40, } lowerCamelCase_ = { '''169M''': 768, '''430M''': 1024, '''1B5''': 2048, '''3B''': 2560, '''7B''': 4096, '''14B''': 5120, } def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = list(state_dict.keys() ) for name in state_dict_keys: UpperCAmelCase_ : Tuple = state_dict.pop(A__ ) # emb -> embedding if name.startswith("emb." ): UpperCAmelCase_ : str = name.replace("emb." ,"embeddings." ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("blocks.0.ln0" ): UpperCAmelCase_ : Tuple = name.replace("blocks.0.ln0" ,"blocks.0.pre_ln" ) # att -> attention UpperCAmelCase_ : List[str] = re.sub(r"blocks\.(\d+)\.att" ,r"blocks.\1.attention" ,A__ ) # ffn -> feed_forward UpperCAmelCase_ : Optional[int] = re.sub(r"blocks\.(\d+)\.ffn" ,r"blocks.\1.feed_forward" ,A__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(".time_mix_k" ): UpperCAmelCase_ : List[Any] = name.replace(".time_mix_k" ,".time_mix_key" ) # time_mix_v -> time_mix_value and reshape if name.endswith(".time_mix_v" ): UpperCAmelCase_ : Any = name.replace(".time_mix_v" ,".time_mix_value" ) # time_mix_r -> time_mix_key and reshape if name.endswith(".time_mix_r" ): UpperCAmelCase_ : List[Any] = name.replace(".time_mix_r" ,".time_mix_receptance" ) if name != "head.weight": UpperCAmelCase_ : Union[str, Any] = "rwkv." + name UpperCAmelCase_ : Optional[int] = weight return state_dict def snake_case ( A__ ,A__ ,A__ ,A__=None ,A__=None ,A__=False ,A__=None ): # 1. If possible, build the tokenizer. if tokenizer_file is None: print("No `--tokenizer_file` provided, we will use the default tokenizer." ) UpperCAmelCase_ : List[str] = 5_02_77 UpperCAmelCase_ : Tuple = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" ) else: UpperCAmelCase_ : Optional[Any] = PreTrainedTokenizerFast(tokenizer_file=A__ ) UpperCAmelCase_ : List[Any] = len(A__ ) tokenizer.save_pretrained(A__ ) # 2. Build the config UpperCAmelCase_ : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: UpperCAmelCase_ : Optional[Any] = candidate break if size is None: raise ValueError("Could not infer the size, please provide it with the `--size` argument." ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) UpperCAmelCase_ : Tuple = RwkvConfig( vocab_size=A__ ,num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] ,hidden_size=HIDEN_SIZE_MAPPING[size] ,) config.save_pretrained(A__ ) # 3. Download model file then convert state_dict UpperCAmelCase_ : Optional[int] = hf_hub_download(A__ ,A__ ) UpperCAmelCase_ : Dict = torch.load(A__ ,map_location="cpu" ) UpperCAmelCase_ : int = convert_state_dict(A__ ) # 4. Split in shards and save UpperCAmelCase_ , UpperCAmelCase_ : Any = shard_checkpoint(A__ ) for shard_file, shard in shards.items(): torch.save(A__ ,os.path.join(A__ ,A__ ) ) if index is not None: UpperCAmelCase_ : int = os.path.join(A__ ,A__ ) # Save the index as well with open(A__ ,"w" ,encoding="utf-8" ) as f: UpperCAmelCase_ : int = json.dumps(A__ ,indent=2 ,sort_keys=A__ ) + "\n" f.write(A__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( "Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." ) UpperCAmelCase_ : Any = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: UpperCAmelCase_ : Dict = torch.load(os.path.join(A__ ,A__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} ,os.path.join(A__ ,A__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("Please provide a `model_name` to push the model to the Hub." ) UpperCAmelCase_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(A__ ) model.push_to_hub(A__ ,max_shard_size="2GB" ) tokenizer.push_to_hub(A__ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.''' ) parser.add_argument( '''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.''' ) parser.add_argument( '''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.''' ) parser.add_argument( '''--tokenizer_file''', default=None, type=str, help='''Path to the tokenizer file to use (if not provided, only the model is converted).''', ) parser.add_argument( '''--size''', default=None, type=str, help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Push to the Hub the converted model.''', ) parser.add_argument( '''--model_name''', default=None, type=str, help='''Name of the pushed model on the Hub, including the username / organization.''', ) lowerCamelCase_ = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
268
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCamelCase_ : # setable values __magic_name__ = None __magic_name__ = None __magic_name__ = None # sigma(t_i) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]: return cls() @dataclass class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 class UpperCamelCase_ (__A , __A ): @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return True @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: return KarrasVeSchedulerState.create() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState: UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy() UpperCAmelCase_ : Dict = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 ) UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape ) UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict: raise NotImplementedError()
268
1
"""simple docstring""" import numpy as np from transformers import Pipeline def snake_case ( A__ ): UpperCAmelCase_ : Optional[Any] = np.max(A__ ,axis=-1 ,keepdims=A__ ) UpperCAmelCase_ : List[Any] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=A__ ) class UpperCamelCase_ (__A ): def _SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase_ : int ) -> List[str]: UpperCAmelCase_ : Optional[Any] = {} if "second_text" in kwargs: UpperCAmelCase_ : List[Any] = kwargs["second_text"] return preprocess_kwargs, {}, {} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=None ) -> Dict: return self.tokenizer(lowerCAmelCase_ , text_pair=lowerCAmelCase_ , return_tensors=self.framework ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] ) -> Tuple: return self.model(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = model_outputs.logits[0].numpy() UpperCAmelCase_ : Optional[int] = softmax(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = np.argmax(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.model.config.idalabel[best_class] UpperCAmelCase_ : int = probabilities[best_class].item() UpperCAmelCase_ : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
268
"""simple docstring""" def snake_case ( A__ ,A__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps UpperCAmelCase_ : Optional[int] = boundary[0] UpperCAmelCase_ : str = boundary[1] UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ ) UpperCAmelCase_ : List[str] = 0.0 y += (h / 2.0) * f(A__ ) for i in x_i: # print(i) y += h * f(A__ ) y += (h / 2.0) * f(A__ ) return y def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase_ : Optional[Any] = x + h def snake_case ( A__ ): # enter your function here UpperCAmelCase_ : Dict = (x - 0) * (x - 0) return y def snake_case ( ): UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
268
1
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.txt'''} lowerCamelCase_ = { '''vocab_file''': { '''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''', }, } lowerCamelCase_ = { '''openbmb/cpm-ant-10b''': 1024, } def snake_case ( A__ ): UpperCAmelCase_ : List[str] = collections.OrderedDict() with open(A__ ,"r" ,encoding="utf-8" ) as reader: UpperCAmelCase_ : Dict = reader.readlines() for index, token in enumerate(A__ ): UpperCAmelCase_ : Optional[Any] = token.rstrip("\n" ) UpperCAmelCase_ : Optional[Any] = index return vocab class UpperCamelCase_ (__A ): def __init__( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : int=200 ) -> Tuple: UpperCAmelCase_ : str = vocab UpperCAmelCase_ : str = unk_token UpperCAmelCase_ : int = max_input_chars_per_word def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : List[Any] ) -> Dict: UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > self.max_input_chars_per_word: return [self.unk_token] UpperCAmelCase_ : Tuple = 0 UpperCAmelCase_ : str = [] while start < len(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = None while start < end: UpperCAmelCase_ : Dict = "".join(chars[start:end] ) if substr in self.vocab: UpperCAmelCase_ : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowerCAmelCase_ ) UpperCAmelCase_ : int = end return sub_tokens class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = False def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any="<d>" , lowerCAmelCase_ : Tuple="</d>" , lowerCAmelCase_ : int="<s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Tuple="<unk>" , lowerCAmelCase_ : Optional[int]="</n>" , lowerCAmelCase_ : List[str]="</_>" , lowerCAmelCase_ : Any="left" , **lowerCAmelCase_ : List[Any] , ) -> str: requires_backends(self , ["jieba"] ) super().__init__( bod_token=lowerCAmelCase_ , eod_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , line_token=lowerCAmelCase_ , space_token=lowerCAmelCase_ , padding_side=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = bod_token UpperCAmelCase_ : Optional[int] = eod_token UpperCAmelCase_ : List[Any] = load_vocab(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.encoder[space_token] UpperCAmelCase_ : Any = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCAmelCase_ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) ) UpperCAmelCase_ : List[Any] = {v: k for k, v in self.encoder.items()} UpperCAmelCase_ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: return self.encoder[self.bod_token] @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return self.encoder[self.eod_token] @property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: return self.encoder["\n"] @property def _SCREAMING_SNAKE_CASE ( self : str ) -> int: return len(self.encoder ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: return dict(self.encoder , **self.added_tokens_encoder ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = [] for x in jieba.cut(lowerCAmelCase_ , cut_all=lowerCAmelCase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase_ ) ) return output_tokens def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> str: UpperCAmelCase_ : Optional[Any] = [i for i in token_ids if i >= 0] UpperCAmelCase_ : Dict = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str] ) -> str: return token in self.encoder def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> str: return "".join(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Tuple ) -> Dict: return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[Any] ) -> str: return self.decoder.get(lowerCAmelCase_ , self.unk_token ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if os.path.isdir(lowerCAmelCase_ ): UpperCAmelCase_ : Union[str, Any] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: UpperCAmelCase_ : str = (filename_prefix + "-" if filename_prefix else "") + save_directory UpperCAmelCase_ : List[str] = 0 if " " in self.encoder: UpperCAmelCase_ : Union[str, Any] = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: UpperCAmelCase_ : Optional[int] = self.encoder["\n"] del self.encoder["\n"] UpperCAmelCase_ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase_ : x[1] ) ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" " Please check that the vocabulary is not corrupted!" ) UpperCAmelCase_ : int = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : List[int] = None ) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) return [1] + ([0] * len(lowerCAmelCase_ ))
268
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def snake_case ( A__ ,A__ ,A__ ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] UpperCAmelCase_ : Dict = (low + high) // 2 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1 UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1 UpperCAmelCase_ : int | float = 0 for i in range(A__ ,low - 1 ,-1 ): summ += arr[i] if summ > left_sum: UpperCAmelCase_ : str = summ UpperCAmelCase_ : Any = i UpperCAmelCase_ : Dict = 0 for i in range(mid + 1 ,high + 1 ): summ += arr[i] if summ > right_sum: UpperCAmelCase_ : List[Any] = summ UpperCAmelCase_ : Optional[Any] = i return max_left, max_right, (left_sum + right_sum) def snake_case ( A__ ): UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )] UpperCAmelCase_ : str = time.time() max_subarray(A__ ,0 ,input_size - 1 ) UpperCAmelCase_ : int = time.time() return end - start def snake_case ( ): UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00] UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(A__ ,A__ ): print(A__ ,"\t\t" ,A__ ) plt.plot(A__ ,A__ ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
268
1
"""simple docstring""" from __future__ import annotations def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,): UpperCAmelCase_ : int = len(A__ ) # If row is equal to the size of the board it means there are a queen in each row in # the current board (possible_board) if row == n: # We convert the variable possible_board that looks like this: [1, 3, 0, 2] to # this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . '] boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] ) return # We iterate each column in the row to find all possible results in each row for col in range(A__ ): # We apply that we learned previously. First we check that in the current board # (possible_board) there are not other same value because if there is it means # that there are a collision in vertical. Then we apply the two formulas we # learned before: # # 45º: y - x = b or 45: row - col = b # 135º: y + x = b or row + col = b. # # And we verify if the results of this two formulas not exist in their variables # respectively. (diagonal_right_collisions, diagonal_left_collisions) # # If any or these are True it means there is a collision so we continue to the # next value in the for loop. if ( col in possible_board or row - col in diagonal_right_collisions or row + col in diagonal_left_collisions ): continue # If it is False we call dfs function again and we update the inputs depth_first_search( [*possible_board, col] ,[*diagonal_right_collisions, row - col] ,[*diagonal_left_collisions, row + col] ,A__ ,A__ ,) def snake_case ( A__ ): UpperCAmelCase_ : list[list[str]] = [] depth_first_search([] ,[] ,[] ,A__ ,A__ ) # Print all the boards for board in boards: for column in board: print(A__ ) print("" ) print(len(A__ ) ,"solutions were found." ) if __name__ == "__main__": import doctest doctest.testmod() n_queens_solution(4)
268
"""simple docstring""" from __future__ import annotations import time lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict: UpperCAmelCase_ : Any = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : int = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Union[str, Any] = parent class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None: while self.node_queue: UpperCAmelCase_ : str = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[Any] = True return self.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ ) for node in successors: self.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : List[str] = [] for action in delta: UpperCAmelCase_ : List[Any] = parent.pos_x + action[1] UpperCAmelCase_ : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) ) return successors def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : Union[str, Any] = node UpperCAmelCase_ : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase_ : str = True return self.retrace_bidirectional_path( lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = current_bwd_node UpperCAmelCase_ : List[str] = current_fwd_node UpperCAmelCase_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path: UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ = time.time() lowerCamelCase_ = BreadthFirstSearch(init, goal) lowerCamelCase_ = bfs.search() lowerCamelCase_ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCamelCase_ = time.time() lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ = bd_bfs.search() lowerCamelCase_ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
268
1
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class UpperCamelCase_ (unittest.TestCase ): def __init__( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : List[str]=30 , lowerCAmelCase_ : int=400 , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]=1 / 255 , lowerCAmelCase_ : List[str]=True , ) -> str: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p UpperCAmelCase_ : int = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Optional[int] = batch_size UpperCAmelCase_ : List[str] = num_channels UpperCAmelCase_ : Union[str, Any] = min_resolution UpperCAmelCase_ : Tuple = max_resolution UpperCAmelCase_ : Dict = do_resize UpperCAmelCase_ : List[Any] = size UpperCAmelCase_ : Union[str, Any] = do_normalize UpperCAmelCase_ : Any = image_mean UpperCAmelCase_ : int = image_std UpperCAmelCase_ : Tuple = do_rescale UpperCAmelCase_ : Any = rescale_factor UpperCAmelCase_ : Dict = do_pad def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple=False ) -> List[str]: if not batched: UpperCAmelCase_ : Any = image_inputs[0] if isinstance(lowerCAmelCase_ , Image.Image ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = image.size else: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = image.shape[1], image.shape[2] if w < h: UpperCAmelCase_ : Any = int(self.size["shortest_edge"] * h / w ) UpperCAmelCase_ : Tuple = self.size["shortest_edge"] elif w > h: UpperCAmelCase_ : List[str] = self.size["shortest_edge"] UpperCAmelCase_ : List[str] = int(self.size["shortest_edge"] * w / h ) else: UpperCAmelCase_ : str = self.size["shortest_edge"] UpperCAmelCase_ : List[str] = self.size["shortest_edge"] else: UpperCAmelCase_ : int = [] for image in image_inputs: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase_ : Dict = max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[0] )[0] UpperCAmelCase_ : Any = max(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = DeformableDetrImageProcessor if is_vision_available() else None def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = DeformableDetrImageProcessingTester(self ) @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "image_std" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "do_rescale" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "do_pad" ) ) self.assertTrue(hasattr(lowerCAmelCase_ , "size" ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase_ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: # Initialize image_processing UpperCAmelCase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , Image.Image ) # Test not batched input UpperCAmelCase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: # Initialize image_processing UpperCAmelCase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , np.ndarray ) # Test not batched input UpperCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ : int = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: # Initialize image_processing UpperCAmelCase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase_ , torch.Tensor ) # Test not batched input UpperCAmelCase_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase_ : str = image_processing(lowerCAmelCase_ , return_tensors="pt" ).pixel_values UpperCAmelCase_ , UpperCAmelCase_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase_ , batched=lowerCAmelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: # prepare image and target UpperCAmelCase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: UpperCAmelCase_ : List[str] = json.loads(f.read() ) UpperCAmelCase_ : int = {"image_id": 39_769, "annotations": target} # encode them UpperCAmelCase_ : Tuple = DeformableDetrImageProcessor() UpperCAmelCase_ : Optional[int] = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , return_tensors="pt" ) # verify pixel values UpperCAmelCase_ : Any = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase_ ) UpperCAmelCase_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) # verify area UpperCAmelCase_ : Optional[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase_ ) ) # verify boxes UpperCAmelCase_ : List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase_ , atol=1e-3 ) ) # verify image_id UpperCAmelCase_ : Dict = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase_ ) ) # verify is_crowd UpperCAmelCase_ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase_ ) ) # verify class_labels UpperCAmelCase_ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase_ ) ) # verify orig_size UpperCAmelCase_ : List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase_ ) ) # verify size UpperCAmelCase_ : Union[str, Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase_ ) ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: # prepare image, target and masks_path UpperCAmelCase_ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: UpperCAmelCase_ : Tuple = json.loads(f.read() ) UpperCAmelCase_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} UpperCAmelCase_ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them UpperCAmelCase_ : Union[str, Any] = DeformableDetrImageProcessor(format="coco_panoptic" ) UpperCAmelCase_ : str = image_processing(images=lowerCAmelCase_ , annotations=lowerCAmelCase_ , masks_path=lowerCAmelCase_ , return_tensors="pt" ) # verify pixel values UpperCAmelCase_ : List[Any] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) # verify area UpperCAmelCase_ : Union[str, Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase_ ) ) # verify boxes UpperCAmelCase_ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase_ , atol=1e-3 ) ) # verify image_id UpperCAmelCase_ : int = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase_ ) ) # verify is_crowd UpperCAmelCase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase_ ) ) # verify class_labels UpperCAmelCase_ : int = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase_ ) ) # verify masks UpperCAmelCase_ : Tuple = 822_873 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase_ ) # verify orig_size UpperCAmelCase_ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase_ ) ) # verify size UpperCAmelCase_ : str = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase_ ) )
268
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = MBartTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Tuple = vocab_file UpperCAmelCase_ : str = False if not self.vocab_file else True UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCAmelCase_ : Tuple = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase_ : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : List[str] = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding: UpperCAmelCase_ : List[Any] = src_lang UpperCAmelCase_ : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None: UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCAmelCase_ : List[str] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
268
1
"""simple docstring""" import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def snake_case ( A__ ,A__ ,A__ ,A__=None ,A__=None ,A__=None ,A__=None ,A__=None ,): if attention_mask is None: UpperCAmelCase_ : str = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: UpperCAmelCase_ : Dict = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: UpperCAmelCase_ : Optional[Any] = torch.ones(config.encoder_layers ,config.encoder_attention_heads ,device=A__ ) if decoder_head_mask is None: UpperCAmelCase_ : List[Any] = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=A__ ) if cross_attn_head_mask is None: UpperCAmelCase_ : Optional[Any] = torch.ones(config.decoder_layers ,config.decoder_attention_heads ,device=A__ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=13 , lowerCAmelCase_ : str=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : Tuple=99 , lowerCAmelCase_ : str=16 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Tuple=4 , lowerCAmelCase_ : Union[str, Any]="relu" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.1 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : str=20 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : List[Any]=0 , ) -> Dict: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : Any = is_training UpperCAmelCase_ : List[str] = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : Any = hidden_act UpperCAmelCase_ : Any = hidden_dropout_prob UpperCAmelCase_ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : List[Any] = decoder_layerdrop UpperCAmelCase_ : Dict = max_position_embeddings UpperCAmelCase_ : List[Any] = eos_token_id UpperCAmelCase_ : str = pad_token_id UpperCAmelCase_ : int = bos_token_id def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : List[str] = self.eos_token_id # Eos Token UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input UpperCAmelCase_ : Union[str, Any] = input_ids.clamp(self.pad_token_id + 1 ) UpperCAmelCase_ : Optional[int] = decoder_input_ids.clamp(self.pad_token_id + 1 ) UpperCAmelCase_ : Tuple = self.get_config() UpperCAmelCase_ : List[Any] = prepare_mam_aaa_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> int: UpperCAmelCase_ : Optional[Any] = MaMaaaModel(config=lowerCAmelCase_ ).get_decoder().to(lowerCAmelCase_ ).eval() UpperCAmelCase_ : str = inputs_dict["input_ids"] UpperCAmelCase_ : Optional[int] = inputs_dict["attention_mask"] UpperCAmelCase_ : Union[str, Any] = inputs_dict["head_mask"] # first forward pass UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase_ : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and UpperCAmelCase_ : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase_ : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )["last_hidden_state"] UpperCAmelCase_ : Any = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[ "last_hidden_state" ] # select random slice UpperCAmelCase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-2 ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> str: UpperCAmelCase_ : str = MaMaaaModel(config=lowerCAmelCase_ ).to(lowerCAmelCase_ ).eval() UpperCAmelCase_ : int = model(**lowerCAmelCase_ ) UpperCAmelCase_ : Dict = outputs.encoder_last_hidden_state UpperCAmelCase_ : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Union[str, Any] = model.get_encoder() encoder.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = MaMaaaEncoder.from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase_ : Dict = model.get_decoder() decoder.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = MaMaaaDecoder.from_pretrained(lowerCAmelCase_ ).to(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class UpperCamelCase_ (__A , __A , __A , unittest.TestCase ): __magic_name__ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) __magic_name__ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () __magic_name__ = ( { '''conversational''': MaMaaaForConditionalGeneration, '''feature-extraction''': MaMaaaModel, '''summarization''': MaMaaaForConditionalGeneration, '''text2text-generation''': MaMaaaForConditionalGeneration, '''translation''': MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = True __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ) -> str: if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : List[str] = MaMaaaModelTester(self ) UpperCAmelCase_ : Dict = ConfigTester(self , config_class=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_class.from_pretrained(lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ ) self.assertEqual(info["missing_keys"] , [] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = copy.deepcopy(self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) if not self.is_encoder_decoder: UpperCAmelCase_ : Optional[int] = inputs["input_ids"] del inputs["input_ids"] else: UpperCAmelCase_ : Dict = inputs["input_ids"] UpperCAmelCase_ : Dict = inputs.get("decoder_input_ids" , lowerCAmelCase_ ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model.get_input_embeddings() if not self.is_encoder_decoder: UpperCAmelCase_ : str = wte(lowerCAmelCase_ ) else: UpperCAmelCase_ : Optional[int] = wte(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = wte(lowerCAmelCase_ ) with torch.no_grad(): model(**lowerCAmelCase_ )[0] def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = input_dict["input_ids"] UpperCAmelCase_ : Optional[Any] = input_ids.ne(1 ).to(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = MaMaaaForConditionalGeneration(lowerCAmelCase_ ).eval().to(lowerCAmelCase_ ) if torch_device == "cuda": model.half() model.generate(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) model.generate(num_beams=4 , do_sample=lowerCAmelCase_ , early_stopping=lowerCAmelCase_ , num_return_sequences=3 ) def snake_case ( A__ ): return torch.tensor(A__ ,dtype=torch.long ,device=A__ ) lowerCamelCase_ = 1E-4 @require_torch @require_sentencepiece @require_tokenizers @slow class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) UpperCAmelCase_ : Optional[int] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) UpperCAmelCase_ : str = prepare_mam_aaa_inputs_dict(model.config , lowerCAmelCase_ , lowerCAmelCase_ ) with torch.no_grad(): UpperCAmelCase_ : List[str] = model(**lowerCAmelCase_ )[0] UpperCAmelCase_ : int = torch.Size((1, 11, 1_024) ) self.assertEqual(output.shape , lowerCAmelCase_ ) # change to expected output here UpperCAmelCase_ : int = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=lowerCAmelCase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: UpperCAmelCase_ : str = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowerCAmelCase_ ) # change to intended input UpperCAmelCase_ : int = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] ) UpperCAmelCase_ : Union[str, Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] ) UpperCAmelCase_ : Optional[Any] = prepare_mam_aaa_inputs_dict(model.config , lowerCAmelCase_ , lowerCAmelCase_ ) with torch.no_grad(): UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ )[0] UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 11, model.config.vocab_size) ) self.assertEqual(output.shape , lowerCAmelCase_ ) # change to expected output here UpperCAmelCase_ : Union[str, Any] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=lowerCAmelCase_ ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) UpperCAmelCase_ : str = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams UpperCAmelCase_ : int = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="pt" ) UpperCAmelCase_ : List[Any] = model.generate( input_ids=dct["input_ids"].to(lowerCAmelCase_ ) , attention_mask=dct["attention_mask"].to(lowerCAmelCase_ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) UpperCAmelCase_ : List[str] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] UpperCAmelCase_ : List[str] = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ ) assert generated == expected_en
268
"""simple docstring""" from torch import nn def snake_case ( A__ ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
268
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp lowerCamelCase_ = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = MvpTokenizer def __init__( self : Dict , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Tuple="replace" , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : List[str]="</s>" , lowerCAmelCase_ : Dict="</s>" , lowerCAmelCase_ : Optional[Any]="<s>" , lowerCAmelCase_ : List[str]="<unk>" , lowerCAmelCase_ : Optional[int]="<pad>" , lowerCAmelCase_ : Optional[int]="<mask>" , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : Union[str, Any]=True , **lowerCAmelCase_ : int , ) -> Optional[Any]: super().__init__( lowerCAmelCase_ , lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , errors=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space: UpperCAmelCase_ : Tuple = getattr(lowerCAmelCase_ , pre_tok_state.pop("type" ) ) UpperCAmelCase_ : Any = add_prefix_space UpperCAmelCase_ : str = pre_tok_class(**lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` UpperCAmelCase_ : Optional[Any] = "post_processor" UpperCAmelCase_ : str = getattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) if tokenizer_component_instance: UpperCAmelCase_ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: UpperCAmelCase_ : str = tuple(state["sep"] ) if "cls" in state: UpperCAmelCase_ : Optional[Any] = tuple(state["cls"] ) UpperCAmelCase_ : List[Any] = False if state.get("add_prefix_space" , lowerCAmelCase_ ) != add_prefix_space: UpperCAmelCase_ : List[str] = add_prefix_space UpperCAmelCase_ : Dict = True if state.get("trim_offsets" , lowerCAmelCase_ ) != trim_offsets: UpperCAmelCase_ : Any = trim_offsets UpperCAmelCase_ : Union[str, Any] = True if changes_to_apply: UpperCAmelCase_ : Optional[Any] = getattr(lowerCAmelCase_ , state.pop("type" ) ) UpperCAmelCase_ : Union[str, Any] = component_class(**lowerCAmelCase_ ) setattr(self.backend_tokenizer , lowerCAmelCase_ , lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : str ) -> str: if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else value UpperCAmelCase_ : List[str] = value def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : str ) -> BatchEncoding: UpperCAmelCase_ : Optional[int] = kwargs.get("is_split_into_words" , lowerCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[int] ) -> BatchEncoding: UpperCAmelCase_ : List[Any] = kwargs.get("is_split_into_words" , lowerCAmelCase_ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: UpperCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase_ , name=lowerCAmelCase_ ) return tuple(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]: UpperCAmelCase_ : List[str] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : int = [self.sep_token_id] UpperCAmelCase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
268
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class UpperCamelCase_ : def __init__( self : str ) -> Dict: UpperCAmelCase_ : List[Any] = "" UpperCAmelCase_ : int = "" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : List[Any] = 256 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[str] = 0 def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 ) UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): UpperCAmelCase_ : List[Any] = x[i] / self.k self.sk += prk UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk if self.rem != 0: UpperCAmelCase_ : Any = int(last % last ) UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) UpperCAmelCase_ : Dict = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCAmelCase_ : Any = self.img[j][i] if num != self.last_list[num]: UpperCAmelCase_ : Tuple = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCamelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
268
1
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): if index == number_of_items: return 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack( A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 ) return max(A__ ,A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : str = image_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Tuple = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : int = depths UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : str = scope UpperCAmelCase_ : str = len(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[Any] = layer_type UpperCAmelCase_ : int = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ): UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple() def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ): if isinstance(lowerCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
268
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''', '''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class UpperCamelCase_ (__A ): __magic_name__ = '''mobilenet_v1''' def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : Optional[Any]=224 , lowerCAmelCase_ : Any=1.0 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : Optional[int]="relu6" , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : List[str]=0.9_9_9 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : Optional[Any]=0.0_0_1 , **lowerCAmelCase_ : List[Any] , ) -> List[str]: super().__init__(**lowerCAmelCase_ ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Dict = depth_multiplier UpperCAmelCase_ : List[str] = min_depth UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : Dict = tf_padding UpperCAmelCase_ : List[Any] = classifier_dropout_prob UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Union[str, Any] = layer_norm_eps class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1e-4
268
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class UpperCamelCase_ : __magic_name__ = '''dummy_data''' __magic_name__ = '''datasets''' __magic_name__ = False def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple: UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : int = dataset_name UpperCAmelCase_ : Optional[int] = cache_dir UpperCAmelCase_ : Tuple = use_local_dummy_data UpperCAmelCase_ : int = config # download_callbacks take a single url as input UpperCAmelCase_ : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general UpperCAmelCase_ : Dict = str(lowerCAmelCase_ ) # to be downloaded UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : int = None @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if self._dummy_file is None: UpperCAmelCase_ : List[str] = self.download_dummy_data() return self._dummy_file @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: UpperCAmelCase_ : int = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) UpperCAmelCase_ : Union[str, Any] = cached_path( lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ ) return os.path.join(lowerCAmelCase_ , self.dummy_file_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: if self._bucket_url is None: UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]: if self.load_existing_dummy_data: # dummy data is downloaded and tested UpperCAmelCase_ : Dict = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned UpperCAmelCase_ : Optional[int] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , (list, tuple) ): return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ ) else: return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]: return path def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: return {} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for single_url in single_urls: download_callback(lowerCAmelCase_ ) else: UpperCAmelCase_ : Tuple = single_urls download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls] else: UpperCAmelCase_ : Optional[int] = single_urls UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) UpperCAmelCase_ : int = value # make sure that values are unique if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict: UpperCAmelCase_ : str = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url ) UpperCAmelCase_ : Union[str, Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(lowerCAmelCase_ ) return dummy_data_list def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]: def _iter_archive_members(lowerCAmelCase_ : Dict ): # this preserves the order of the members inside the ZIP archive UpperCAmelCase_ : str = Path(self.dummy_file ).parent UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: UpperCAmelCase_ : str = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : str = [paths] for path in paths: if os.path.isfile(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(lowerCAmelCase_ ): if filename.startswith((".", "__") ): continue yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" def snake_case ( A__ ,A__ = False ): if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_31_70_44_06_46_79_88_73_85_96_19_81 and not allow_probable: raise ValueError( "Warning: upper bound of deterministic test is exceeded. " "Pass allow_probable=True to allow probabilistic test. " "A return value of True indicates a probable prime." ) # array bounds provided by analysis UpperCAmelCase_ : List[Any] = [ 20_47, 1_37_36_53, 25_32_60_01, 32_15_03_17_51, 2_15_23_02_89_87_47, 3_47_47_49_66_03_83, 3_41_55_00_71_72_83_21, 1, 3_82_51_23_05_65_46_41_30_51, 1, 1, 31_86_65_85_78_34_03_11_51_16_74_61, 3_31_70_44_06_46_79_88_73_85_96_19_81, ] UpperCAmelCase_ : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(A__ ,1 ): if n < _p: # then we have our last prime to check UpperCAmelCase_ : int = primes[:idx] break UpperCAmelCase_ , UpperCAmelCase_ : Tuple = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: UpperCAmelCase_ : Tuple = False for r in range(A__ ): UpperCAmelCase_ : Optional[Any] = pow(A__ ,d * 2**r ,A__ ) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): UpperCAmelCase_ : str = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def snake_case ( ): assert not miller_rabin(5_61 ) assert miller_rabin(5_63 ) # 2047 assert not miller_rabin(83_82_01 ) assert miller_rabin(83_82_07 ) # 1_373_653 assert not miller_rabin(17_31_60_01 ) assert miller_rabin(17_31_60_17 ) # 25_326_001 assert not miller_rabin(30_78_38_66_41 ) assert miller_rabin(30_78_38_66_53 ) # 3_215_031_751 assert not miller_rabin(1_71_30_45_57_48_01 ) assert miller_rabin(1_71_30_45_57_48_19 ) # 2_152_302_898_747 assert not miller_rabin(2_77_97_99_72_83_07 ) assert miller_rabin(2_77_97_99_72_83_27 ) # 3_474_749_660_383 assert not miller_rabin(1_13_85_00_23_90_94_41 ) assert miller_rabin(1_13_85_00_23_90_95_27 ) # 341_550_071_728_321 assert not miller_rabin(1_27_50_41_01_88_48_80_43_51 ) assert miller_rabin(1_27_50_41_01_88_48_80_43_91 ) # 3_825_123_056_546_413_051 assert not miller_rabin(7_96_66_46_44_58_50_77_87_79_18_67 ) assert miller_rabin(7_96_66_46_44_58_50_77_87_79_19_51 ) # 318_665_857_834_031_151_167_461 assert not miller_rabin(55_28_40_67_74_46_64_78_97_66_03_33 ) assert miller_rabin(55_28_40_67_74_46_64_78_97_66_03_59 ) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
268
"""simple docstring""" lowerCamelCase_ = [ (1000, '''M'''), (900, '''CM'''), (500, '''D'''), (400, '''CD'''), (100, '''C'''), (90, '''XC'''), (50, '''L'''), (40, '''XL'''), (10, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def snake_case ( A__ ): UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00} UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Tuple = 0 while place < len(A__ ): if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = [] for arabic, roman in ROMAN: ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ ) result.append(roman * factor ) if number == 0: break return "".join(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import KarrasVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = 42 def __init__( self : Tuple , lowerCAmelCase_ : UNetaDModel , lowerCAmelCase_ : KarrasVeScheduler ) -> Any: super().__init__() self.register_modules(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ ) @torch.no_grad() def __call__( self : int , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase_ : Optional[str] = "pil" , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : List[Any] , ) -> Union[Tuple, ImagePipelineOutput]: UpperCAmelCase_ : Optional[Any] = self.unet.config.sample_size UpperCAmelCase_ : Optional[int] = (batch_size, 3, img_size, img_size) UpperCAmelCase_ : Union[str, Any] = self.unet # sample x_0 ~ N(0, sigma_0^2 * I) UpperCAmelCase_ : Optional[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=self.device ) * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(lowerCAmelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # here sigma_t == t_i from the paper UpperCAmelCase_ : Dict = self.scheduler.schedule[t] UpperCAmelCase_ : Any = self.scheduler.schedule[t - 1] if t > 0 else 0 # 1. Select temporarily increased noise level sigma_hat # 2. Add new noise to move from sample_i to sample_hat UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.scheduler.add_noise_to_input(lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_ ) # 3. Predict the noise residual given the noise magnitude `sigma_hat` # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase_ : int = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample # 4. Evaluate dx/dt at sigma_hat # 5. Take Euler step from sigma to sigma_prev UpperCAmelCase_ : List[str] = self.scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if sigma_prev != 0: # 6. Apply 2nd order correction # The model inputs and output are adjusted by following eq. (213) in [1]. UpperCAmelCase_ : Tuple = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample UpperCAmelCase_ : Tuple = self.scheduler.step_correct( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , step_output.prev_sample , step_output["derivative"] , ) UpperCAmelCase_ : Dict = step_output.prev_sample UpperCAmelCase_ : List[str] = (sample / 2 + 0.5).clamp(0 , 1 ) UpperCAmelCase_ : int = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCAmelCase_ : List[Any] = self.numpy_to_pil(lowerCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCAmelCase_ )
268
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : List[str] = relative_attention UpperCAmelCase_ : List[Any] = position_biased_input UpperCAmelCase_ : Dict = pos_att_type UpperCAmelCase_ : Optional[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Tuple = None if self.use_input_mask: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = self.get_config() UpperCAmelCase_ : int = 300 return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = DebertaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ (unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" ) UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
268
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def snake_case ( A__ ): return ConvertCommand( args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name ) lowerCamelCase_ = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class UpperCamelCase_ (__A ): @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : ArgumentParser ) -> List[Any]: UpperCAmelCase_ : Dict = parser.add_parser( "convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , ) train_parser.add_argument("--model_type" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Model's type." ) train_parser.add_argument( "--tf_checkpoint" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="TensorFlow checkpoint path or folder." ) train_parser.add_argument( "--pytorch_dump_output" , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="Path to the PyTorch saved model output." ) train_parser.add_argument("--config" , type=lowerCAmelCase_ , default="" , help="Configuration file path or folder." ) train_parser.add_argument( "--finetuning_task_name" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="Optional fine-tuning task name if the TF model was a finetuned model." , ) train_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[int] , ) -> Tuple: UpperCAmelCase_ : List[Any] = logging.get_logger("transformers-cli/converting" ) self._logger.info(f"""Loading model {model_type}""" ) UpperCAmelCase_ : Any = model_type UpperCAmelCase_ : Dict = tf_checkpoint UpperCAmelCase_ : Dict = pytorch_dump_output UpperCAmelCase_ : str = config UpperCAmelCase_ : Optional[int] = finetuning_task_name def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(lowerCAmelCase_ ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) if "ckpt" in self._tf_checkpoint.lower(): UpperCAmelCase_ : Dict = self._tf_checkpoint UpperCAmelCase_ : Optional[int] = "" else: UpperCAmelCase_ : str = self._tf_checkpoint UpperCAmelCase_ : str = "" convert_transfo_xl_checkpoint_to_pytorch( lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(lowerCAmelCase_ ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( "--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
268
"""simple docstring""" import os def snake_case ( ): with open(os.path.dirname(A__ ) + "/grid.txt" ) as f: UpperCAmelCase_ : Any = [] # noqa: E741 for _ in range(20 ): l.append([int(A__ ) for x in f.readline().split()] ) UpperCAmelCase_ : Any = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ : Any = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ : Tuple = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 ,20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp return maximum if __name__ == "__main__": print(solution())
268
1
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class UpperCamelCase_ (unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : List[str] = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split() UpperCAmelCase_ : Optional[Any] = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) UpperCAmelCase_ : Any = { "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", } UpperCAmelCase_ : str = { "feature_size": 1, "padding_value": 0.0, "sampling_rate": 16_000, "return_attention_mask": False, "do_normalize": True, } UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , lowerCAmelCase_ ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + "\n" ) with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + "\n" ) # load decoder from hub UpperCAmelCase_ : Optional[Any] = "hf-internal-testing/ngram-beam-search-decoder" def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **lowerCAmelCase_ : List[Any] ) -> List[str]: UpperCAmelCase_ : List[str] = self.add_kwargs_tokens_map.copy() kwargs.update(lowerCAmelCase_ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase_ : Tuple ) -> Optional[int]: return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase_ : Optional[int] ) -> int: return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : str ) -> str: UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_feature_extractor() UpperCAmelCase_ : List[str] = self.get_decoder() UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> str: UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: UpperCAmelCase_ : Optional[int] = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["xx"] ) with self.assertRaisesRegex(lowerCAmelCase_ , "include" ): WavaVecaProcessorWithLM( tokenizer=lowerCAmelCase_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[str] = self.get_feature_extractor() UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : Dict = self.get_decoder() UpperCAmelCase_ : str = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase_ : Optional[Any] = feature_extractor(lowerCAmelCase_ , return_tensors="np" ) UpperCAmelCase_ : Tuple = processor(lowerCAmelCase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: UpperCAmelCase_ : str = self.get_feature_extractor() UpperCAmelCase_ : Optional[int] = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_decoder() UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) UpperCAmelCase_ : int = "This is a test string" UpperCAmelCase_ : Union[str, Any] = processor(text=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer(lowerCAmelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int=(2, 10, 16) , lowerCAmelCase_ : List[str]=77 ) -> Dict: np.random.seed(lowerCAmelCase_ ) return np.random.rand(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple: UpperCAmelCase_ : List[str] = self.get_feature_extractor() UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : Dict = self.get_decoder() UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) UpperCAmelCase_ : Tuple = processor.decode(lowerCAmelCase_ ) UpperCAmelCase_ : int = decoder.decode_beams(lowerCAmelCase_ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("</s> <s> </s>" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["fork"], ["spawn"]] ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = self.get_feature_extractor() UpperCAmelCase_ : List[str] = self.get_tokenizer() UpperCAmelCase_ : int = self.get_decoder() UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) UpperCAmelCase_ : str = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase_ : str = processor.batch_decode(lowerCAmelCase_ ) else: with get_context(lowerCAmelCase_ ).Pool() as pool: UpperCAmelCase_ : Optional[int] = processor.batch_decode(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = list(lowerCAmelCase_ ) with get_context("fork" ).Pool() as p: UpperCAmelCase_ : Union[str, Any] = decoder.decode_beams_batch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(lowerCAmelCase_ , decoded_processor.text ) self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text ) self.assertListEqual(lowerCAmelCase_ , decoded_processor.logit_score ) self.assertListEqual(lowerCAmelCase_ , decoded_processor.lm_score ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : List[Any] = self.get_feature_extractor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Union[str, Any] = self.get_decoder() UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) UpperCAmelCase_ : str = self._get_dummy_logits() UpperCAmelCase_ : Tuple = 15 UpperCAmelCase_ : List[str] = -2_0.0 UpperCAmelCase_ : int = -4.0 UpperCAmelCase_ : Union[str, Any] = processor.batch_decode( lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , ) UpperCAmelCase_ : Union[str, Any] = decoded_processor_out.text UpperCAmelCase_ : int = list(lowerCAmelCase_ ) with get_context("fork" ).Pool() as pool: UpperCAmelCase_ : List[Any] = decoder.decode_beams_batch( lowerCAmelCase_ , lowerCAmelCase_ , beam_width=lowerCAmelCase_ , beam_prune_logp=lowerCAmelCase_ , token_min_logp=lowerCAmelCase_ , ) UpperCAmelCase_ : int = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase_ : int = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase_ : Tuple = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , lowerCAmelCase_ ) self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowerCAmelCase_ , atol=1e-3 ) ) self.assertTrue(np.array_equal(lowerCAmelCase_ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowerCAmelCase_ , atol=1e-3 ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: UpperCAmelCase_ : int = self.get_feature_extractor() UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : Tuple = self.get_decoder() UpperCAmelCase_ : int = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._get_dummy_logits() UpperCAmelCase_ : int = 2.0 UpperCAmelCase_ : Tuple = 5.0 UpperCAmelCase_ : Tuple = -2_0.0 UpperCAmelCase_ : Union[str, Any] = True UpperCAmelCase_ : Optional[Any] = processor.batch_decode( lowerCAmelCase_ , alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , ) UpperCAmelCase_ : Tuple = decoded_processor_out.text UpperCAmelCase_ : Union[str, Any] = list(lowerCAmelCase_ ) decoder.reset_params( alpha=lowerCAmelCase_ , beta=lowerCAmelCase_ , unk_score_offset=lowerCAmelCase_ , lm_score_boundary=lowerCAmelCase_ , ) with get_context("fork" ).Pool() as pool: UpperCAmelCase_ : int = decoder.decode_beams_batch( lowerCAmelCase_ , lowerCAmelCase_ , ) UpperCAmelCase_ : Any = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , lowerCAmelCase_ ) UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> str: UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase_ : List[str] = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ : Optional[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() UpperCAmelCase_ : str = os.listdir(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = ["alphabet.json", "language_model"] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: UpperCAmelCase_ : str = snapshot_download("hf-internal-testing/processor_with_lm" ) UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase_ : List[Any] = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute() UpperCAmelCase_ : str = os.listdir(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = os.listdir(lowerCAmelCase_ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase_ : List[Any] = floats_list((3, 1_000) ) UpperCAmelCase_ : Optional[Any] = processor_wavaveca(lowerCAmelCase_ , return_tensors="np" ) UpperCAmelCase_ : Union[str, Any] = processor_auto(lowerCAmelCase_ , return_tensors="np" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) UpperCAmelCase_ : List[Any] = self._get_dummy_logits() UpperCAmelCase_ : List[str] = processor_wavaveca.batch_decode(lowerCAmelCase_ ) UpperCAmelCase_ : int = processor_auto.batch_decode(lowerCAmelCase_ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ : Dict = self.get_feature_extractor() UpperCAmelCase_ : Any = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_decoder() UpperCAmelCase_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ , decoder=lowerCAmelCase_ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , ) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> List[str]: UpperCAmelCase_ : List[Any] = [d[key] for d in offsets] return retrieved_list def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()[0] UpperCAmelCase_ : List[str] = processor.decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: UpperCAmelCase_ : int = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" ) UpperCAmelCase_ : Any = self._get_dummy_logits() UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(lowerCAmelCase_ , output_word_offsets=lowerCAmelCase_ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("text" in outputs ) self.assertTrue("word_offsets" in outputs ) self.assertTrue(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertListEqual( [" ".join(self.get_from_offsets(lowerCAmelCase_ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: import torch UpperCAmelCase_ : Union[str, Any] = load_dataset("common_voice" , "en" , split="train" , streaming=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = ds.cast_column("audio" , datasets.Audio(sampling_rate=16_000 ) ) UpperCAmelCase_ : Optional[Any] = iter(lowerCAmelCase_ ) UpperCAmelCase_ : Any = next(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) UpperCAmelCase_ : List[Any] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase_ : int = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ ).logits.cpu().numpy() UpperCAmelCase_ : Any = processor.decode(logits[0] , output_word_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase_ : str = [ { "start_time": d["start_offset"] * time_offset, "end_time": d["end_offset"] * time_offset, "word": d["word"], } for d in output["word_offsets"] ] UpperCAmelCase_ : Optional[int] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL" # output words self.assertEqual(" ".join(self.get_from_offsets(lowerCAmelCase_ , "word" ) ) , lowerCAmelCase_ ) self.assertEqual(" ".join(self.get_from_offsets(lowerCAmelCase_ , "word" ) ) , output.text ) # output times UpperCAmelCase_ : Tuple = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , "start_time" ) ) UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(lowerCAmelCase_ , "end_time" ) ) # fmt: off UpperCAmelCase_ : List[Any] = torch.tensor([1.4_1_9_9, 1.6_5_9_9, 2.2_5_9_9, 3.0, 3.2_4, 3.5_9_9_9, 3.7_9_9_9, 4.0_9_9_9, 4.2_6, 4.9_4, 5.2_8, 5.6_5_9_9, 5.7_8, 5.9_4, 6.3_2, 6.5_3_9_9, 6.6_5_9_9] ) UpperCAmelCase_ : Union[str, Any] = torch.tensor([1.5_3_9_9, 1.8_9_9_9, 2.9, 3.1_6, 3.5_3_9_9, 3.7_2, 4.0_1_9_9, 4.1_7_9_9, 4.7_6, 5.1_5_9_9, 5.5_5_9_9, 5.6_9_9_9, 5.8_6, 6.1_9_9_9, 6.3_8, 6.6_1_9_9, 6.9_4] ) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.0_1 ) ) self.assertTrue(torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=0.0_1 ) )
268
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def snake_case ( A__ ): UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 ) if "base" in model_name: UpperCAmelCase_ : Any = 6 UpperCAmelCase_ : Optional[Any] = 1_28 UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2) UpperCAmelCase_ : List[str] = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : int = 1_92 UpperCAmelCase_ : List[Any] = (2, 2, 18, 2) UpperCAmelCase_ : int = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase_ : str = window_size UpperCAmelCase_ : Any = embed_dim UpperCAmelCase_ : int = depths UpperCAmelCase_ : Any = num_heads return config def snake_case ( A__ ): if "encoder.mask_token" in name: UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" ) if "norm1" in name: UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase_ : List[str] = "layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase_ : int = "layernorm.bias" if "decoder" in name: pass else: UpperCAmelCase_ : Any = "swin." + name return name def snake_case ( A__ ,A__ ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase_ : Optional[int] = key.split("." ) UpperCAmelCase_ : str = int(key_split[2] ) UpperCAmelCase_ : Union[str, Any] = int(key_split[4] ) UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[Any] = val[:dim, :] UpperCAmelCase_ : str = val[ dim : dim * 2, : ] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : List[str] = val[ :dim ] UpperCAmelCase_ : str = val[ dim : dim * 2 ] UpperCAmelCase_ : Optional[Any] = val[ -dim: ] else: UpperCAmelCase_ : Tuple = val return orig_state_dict def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"] UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ ) UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ ) model.eval() UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ ) model.load_state_dict(A__ ) UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} ) UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw ) UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(**A__ ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase_ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
268
1
"""simple docstring""" import unittest from transformers import BertGenerationTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCamelCase_ = '''▁''' lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''') @require_sentencepiece class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertGenerationTokenizer __magic_name__ = False __magic_name__ = True def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: super().setUp() UpperCAmelCase_ : List[Any] = BertGenerationTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = "<s>" UpperCAmelCase_ : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: UpperCAmelCase_ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "<pad>" ) self.assertEqual(len(lowerCAmelCase_ ) , 1_002 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: self.assertEqual(self.get_tokenizer().vocab_size , 1_000 ) def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: UpperCAmelCase_ : Union[str, Any] = BertGenerationTokenizer(lowerCAmelCase_ , keep_accents=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = tokenizer.tokenize("This is a test" ) self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [285, 46, 10, 170, 382] , ) UpperCAmelCase_ : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) UpperCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ ) self.assertListEqual( lowerCAmelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = "Hello World!" UpperCAmelCase_ : int = [18_536, 2_260, 101] self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: UpperCAmelCase_ : Optional[int] = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth" ) UpperCAmelCase_ : int = [ 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, ] self.assertListEqual(lowerCAmelCase_ , self.big_tokenizer.encode(lowerCAmelCase_ ) ) @require_torch @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]: import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence UpperCAmelCase_ : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10] UpperCAmelCase_ : str = " ".join(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = self.big_tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="pt" , return_token_type_ids=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCAmelCase_ ) UpperCAmelCase_ : Any = BertGenerationConfig() UpperCAmelCase_ : List[Any] = BertGenerationEncoder(lowerCAmelCase_ ) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**lowerCAmelCase_ ) model(**lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: # fmt: off UpperCAmelCase_ : Dict = {"input_ids": [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase_ , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
268
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''rwkv''' __magic_name__ = {'''max_position_embeddings''': '''context_length'''} def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[str] = context_length UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : List[Any] = rescale_every UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : List[str] = bos_token_id UpperCAmelCase_ : Union[str, Any] = eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
268
1
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase_ (__A ): def __init__( self : Union[str, Any] , lowerCAmelCase_ : NestedDataStructureLike[PathLike] , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , **lowerCAmelCase_ : List[Any] , ) -> List[str]: super().__init__( lowerCAmelCase_ , split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , num_proc=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : List[Any] = path_or_paths if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else {self.split: path_or_paths} UpperCAmelCase_ : List[Any] = Text( cache_dir=lowerCAmelCase_ , data_files=lowerCAmelCase_ , features=lowerCAmelCase_ , **lowerCAmelCase_ , ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: # Build iterable dataset if self.streaming: UpperCAmelCase_ : str = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: UpperCAmelCase_ : Any = None UpperCAmelCase_ : int = None UpperCAmelCase_ : int = None UpperCAmelCase_ : Any = None self.builder.download_and_prepare( download_config=lowerCAmelCase_ , download_mode=lowerCAmelCase_ , verification_mode=lowerCAmelCase_ , base_path=lowerCAmelCase_ , num_proc=self.num_proc , ) UpperCAmelCase_ : List[str] = self.builder.as_dataset( split=self.split , verification_mode=lowerCAmelCase_ , in_memory=self.keep_in_memory ) return dataset
268
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
268
1
"""simple docstring""" import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class UpperCamelCase_ (__A , __A , __A , unittest.TestCase ): __magic_name__ = StableUnCLIPPipeline __magic_name__ = TEXT_TO_IMAGE_PARAMS __magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS __magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS __magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = 32 UpperCAmelCase_ : int = embedder_hidden_size # prior components torch.manual_seed(0 ) UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=lowerCAmelCase_ , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) UpperCAmelCase_ : Tuple = PriorTransformer( num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCAmelCase_ , num_layers=1 , ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = DDPMScheduler( variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=lowerCAmelCase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase_ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) UpperCAmelCase_ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) UpperCAmelCase_ : Union[str, Any] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) ) torch.manual_seed(0 ) UpperCAmelCase_ : Any = UNetaDConditionModel( sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase_ , layers_per_block=1 , upcast_attention=lowerCAmelCase_ , use_linear_projection=lowerCAmelCase_ , ) torch.manual_seed(0 ) UpperCAmelCase_ : List[Any] = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase_ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase_ : Any = AutoencoderKL() UpperCAmelCase_ : List[str] = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=0 ) -> Tuple: if str(lowerCAmelCase_ ).startswith("mps" ): UpperCAmelCase_ : Any = torch.manual_seed(lowerCAmelCase_ ) else: UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: UpperCAmelCase_ : Union[str, Any] = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: UpperCAmelCase_ : List[str] = torch_device in ["cpu", "mps"] self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase_ ) @slow @require_torch_gpu class UpperCamelCase_ (unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) UpperCAmelCase_ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase_ : Optional[Any] = pipe("anime turle" , generator=lowerCAmelCase_ , output_type="np" ) UpperCAmelCase_ : Optional[Any] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase_ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa ) UpperCAmelCase_ : Optional[int] = pipe.to(lowerCAmelCase_ ) pipe.set_progress_bar_config(disable=lowerCAmelCase_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase_ : str = pipe( "anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase_ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
268
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self : Any , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def snake_case ( A__ ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def snake_case ( A__ ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def snake_case ( A__ ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def snake_case ( ): # Main function for testing. UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Any = Node(2 ) UpperCAmelCase_ : Optional[Any] = Node(3 ) UpperCAmelCase_ : Union[str, Any] = Node(4 ) UpperCAmelCase_ : int = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Any = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : List[Any] = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("Tree is: " ) display(A__ ) if __name__ == "__main__": main()
268
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCamelCase_ (__A ): __magic_name__ = '''Salesforce/blip-image-captioning-base''' __magic_name__ = ( '''This is a tool that generates a description of an image. It takes an input named `image` which should be the ''' '''image to caption, and returns a text that contains the description in English.''' ) __magic_name__ = '''image_captioner''' __magic_name__ = AutoModelForVisionaSeq __magic_name__ = ['''image'''] __magic_name__ = ['''text'''] def __init__( self : Dict , *lowerCAmelCase_ : str , **lowerCAmelCase_ : Tuple ) -> str: requires_backends(self , ["vision"] ) super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : "Image" ) -> Union[str, Any]: return self.pre_processor(images=lowerCAmelCase_ , return_tensors="pt" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Any: return self.model.generate(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> str: return self.pre_processor.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )[0].strip()
268
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): if index == number_of_items: return 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack( A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 ) return max(A__ ,A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import math def snake_case ( A__ = 1_00 ): UpperCAmelCase_ : int = sum(i * i for i in range(1 ,n + 1 ) ) UpperCAmelCase_ : Union[str, Any] = int(math.pow(sum(range(1 ,n + 1 ) ) ,2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
268
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available lowerCamelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''MLukeTokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class UpperCamelCase_ (__A ): def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]: UpperCAmelCase_ : str = {} if top_k is not None: UpperCAmelCase_ : List[str] = top_k return {}, {}, postprocess_params def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any: UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ ) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any: if top_k > self.model.config.num_labels: UpperCAmelCase_ : int = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ ) elif self.framework == "tf": UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase_ : int = scores.tolist() UpperCAmelCase_ : Optional[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
268
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowerCamelCase_ = { '''configuration_layoutlmv3''': [ '''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv3Config''', '''LayoutLMv3OnnxConfig''', ], '''processing_layoutlmv3''': ['''LayoutLMv3Processor'''], '''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LayoutLMv3TokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LayoutLMv3ForQuestionAnswering''', '''LayoutLMv3ForSequenceClassification''', '''LayoutLMv3ForTokenClassification''', '''LayoutLMv3Model''', '''LayoutLMv3PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLayoutLMv3ForQuestionAnswering''', '''TFLayoutLMv3ForSequenceClassification''', '''TFLayoutLMv3ForTokenClassification''', '''TFLayoutLMv3Model''', '''TFLayoutLMv3PreTrainedModel''', ] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''LayoutLMv3FeatureExtractor'''] lowerCamelCase_ = ['''LayoutLMv3ImageProcessor'''] if TYPE_CHECKING: from .configuration_layoutlmva import ( LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig, LayoutLMvaOnnxConfig, ) from .processing_layoutlmva import LayoutLMvaProcessor from .tokenization_layoutlmva import LayoutLMvaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_layoutlmva import ( LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, LayoutLMvaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_layoutlmva import ( TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMvaForQuestionAnswering, TFLayoutLMvaForSequenceClassification, TFLayoutLMvaForTokenClassification, TFLayoutLMvaModel, TFLayoutLMvaPreTrainedModel, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor from .image_processing_layoutlmva import LayoutLMvaImageProcessor else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCamelCase_ (__A ): __magic_name__ = '''detr''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : str = use_timm_backbone UpperCAmelCase_ : Optional[Any] = backbone_config UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Dict = num_queries UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = encoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = encoder_layers UpperCAmelCase_ : Optional[int] = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : Optional[int] = decoder_attention_heads UpperCAmelCase_ : List[Any] = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : List[str] = activation_function UpperCAmelCase_ : Optional[int] = init_std UpperCAmelCase_ : Union[str, Any] = init_xavier_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Any = auxiliary_loss UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : List[str] = backbone UpperCAmelCase_ : int = use_pretrained_backbone UpperCAmelCase_ : Any = dilation # Hungarian matcher UpperCAmelCase_ : str = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : List[str] = mask_loss_coefficient UpperCAmelCase_ : Dict = dice_loss_coefficient UpperCAmelCase_ : Any = bbox_loss_coefficient UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase_ : int = eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self.d_model @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]: return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]: UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : Any = self.__class__.model_type return output class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 12
268
1
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. lowerCamelCase_ = 10 def snake_case ( A__ ,A__ ,A__ ,A__ ): for i in range(A__ ,A__ ): if array[i] == target: return i return -1 def snake_case ( A__ ,A__ ): UpperCAmelCase_ : List[str] = 0 UpperCAmelCase_ : Union[str, Any] = len(A__ ) while left <= right: if right - left < precision: return lin_search(A__ ,A__ ,A__ ,A__ ) UpperCAmelCase_ : str = (left + right) // 3 + 1 UpperCAmelCase_ : str = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCAmelCase_ : Optional[Any] = one_third - 1 elif array[two_third] < target: UpperCAmelCase_ : int = two_third + 1 else: UpperCAmelCase_ : Any = one_third + 1 UpperCAmelCase_ : List[str] = two_third - 1 else: return -1 def snake_case ( A__ ,A__ ,A__ ,A__ ): if left < right: if right - left < precision: return lin_search(A__ ,A__ ,A__ ,A__ ) UpperCAmelCase_ : str = (left + right) // 3 + 1 UpperCAmelCase_ : Union[str, Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(A__ ,one_third - 1 ,A__ ,A__ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 ,A__ ,A__ ,A__ ) else: return rec_ternary_search(one_third + 1 ,two_third - 1 ,A__ ,A__ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase_ = input('''Enter numbers separated by comma:\n''').strip() lowerCamelCase_ = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), f"List must be ordered.\n{collection}." lowerCamelCase_ = int(input('''Enter the number to be found in the list:\n''').strip()) lowerCamelCase_ = ite_ternary_search(collection, target) lowerCamelCase_ = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(f'Iterative search: {target} found at positions: {resulta}') print(f'Recursive search: {target} found at positions: {resulta}') else: print('''Not found''')
268
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Dict = 0 if start < end: UpperCAmelCase_ : Any = randint(A__ ,A__ ) UpperCAmelCase_ : Union[str, Any] = a[end] UpperCAmelCase_ : List[str] = a[pivot] UpperCAmelCase_ : str = temp UpperCAmelCase_ , UpperCAmelCase_ : str = _in_place_partition(A__ ,A__ ,A__ ) count += _in_place_quick_sort(A__ ,A__ ,p - 1 ) count += _in_place_quick_sort(A__ ,p + 1 ,A__ ) return count def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : int = 0 UpperCAmelCase_ : Dict = randint(A__ ,A__ ) UpperCAmelCase_ : Tuple = a[end] UpperCAmelCase_ : Optional[int] = a[pivot] UpperCAmelCase_ : Union[str, Any] = temp UpperCAmelCase_ : Union[str, Any] = start - 1 for index in range(A__ ,A__ ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value UpperCAmelCase_ : Tuple = new_pivot_index + 1 UpperCAmelCase_ : Dict = a[new_pivot_index] UpperCAmelCase_ : List[Any] = a[index] UpperCAmelCase_ : Union[str, Any] = temp UpperCAmelCase_ : Any = a[new_pivot_index + 1] UpperCAmelCase_ : Optional[int] = a[end] UpperCAmelCase_ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCamelCase_ = TemporaryFile() lowerCamelCase_ = 100 # 1000 elements are to be sorted lowerCamelCase_ , lowerCamelCase_ = 0, 1 # mean and standard deviation lowerCamelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print('''The array is''') print(X) outfile.seek(0) # using the same array lowerCamelCase_ = np.load(outfile) lowerCamelCase_ = len(M) - 1 lowerCamelCase_ = _in_place_quick_sort(M, 0, r) print( '''No of Comparisons for 100 elements selected from a standard normal distribution''' '''is :''' ) print(z)
268
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
1
"""simple docstring""" import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEmbeddings, BertLayer, BertPooler, BertPreTrainedModel, ) def snake_case ( A__ ): UpperCAmelCase_ : Tuple = torch.exp(A__ ) UpperCAmelCase_ : str = torch.sum(A__ ,dim=1 ) # sum of exp(x_i) UpperCAmelCase_ : Optional[int] = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i) return torch.log(A__ ) - B / A class UpperCamelCase_ (nn.Module ): def __init__( self : List[Any] , lowerCAmelCase_ : Dict ) -> Union[str, Any]: super().__init__() UpperCAmelCase_ : int = config.output_attentions UpperCAmelCase_ : int = config.output_hidden_states UpperCAmelCase_ : Dict = nn.ModuleList([BertLayer(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] ) UpperCAmelCase_ : Tuple = nn.ModuleList([BertHighway(lowerCAmelCase_ ) for _ in range(config.num_hidden_layers )] ) UpperCAmelCase_ : List[Any] = [-1 for _ in range(config.num_hidden_layers )] def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Any ) -> Dict: if (type(lowerCAmelCase_ ) is float) or (type(lowerCAmelCase_ ) is int): for i in range(len(self.early_exit_entropy ) ): UpperCAmelCase_ : Dict = x else: UpperCAmelCase_ : Any = x def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple ) -> Tuple: UpperCAmelCase_ : Tuple = pooler.state_dict() for highway in self.highway: for name, param in highway.pooler.state_dict().items(): param.copy_(loaded_model[name] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , ) -> str: UpperCAmelCase_ : Tuple = () UpperCAmelCase_ : Tuple = () UpperCAmelCase_ : int = () for i, layer_module in enumerate(self.layer ): if self.output_hidden_states: UpperCAmelCase_ : List[str] = all_hidden_states + (hidden_states,) UpperCAmelCase_ : int = layer_module( lowerCAmelCase_ , lowerCAmelCase_ , head_mask[i] , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = layer_outputs[0] if self.output_attentions: UpperCAmelCase_ : str = all_attentions + (layer_outputs[1],) UpperCAmelCase_ : Union[str, Any] = (hidden_states,) if self.output_hidden_states: UpperCAmelCase_ : List[Any] = current_outputs + (all_hidden_states,) if self.output_attentions: UpperCAmelCase_ : Tuple = current_outputs + (all_attentions,) UpperCAmelCase_ : int = self.highway[i](lowerCAmelCase_ ) # logits, pooled_output if not self.training: UpperCAmelCase_ : int = highway_exit[0] UpperCAmelCase_ : Dict = entropy(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy UpperCAmelCase_ : List[str] = all_highway_exits + (highway_exit,) if highway_entropy < self.early_exit_entropy[i]: UpperCAmelCase_ : Union[str, Any] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,) raise HighwayException(lowerCAmelCase_ , i + 1 ) else: UpperCAmelCase_ : Any = all_highway_exits + (highway_exit,) # Add last layer if self.output_hidden_states: UpperCAmelCase_ : str = all_hidden_states + (hidden_states,) UpperCAmelCase_ : Optional[int] = (hidden_states,) if self.output_hidden_states: UpperCAmelCase_ : str = outputs + (all_hidden_states,) if self.output_attentions: UpperCAmelCase_ : str = outputs + (all_attentions,) UpperCAmelCase_ : List[str] = outputs + (all_highway_exits,) return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits @add_start_docstrings( '''The Bert Model transformer with early exiting (DeeBERT). ''' , __A , ) class UpperCamelCase_ (__A ): def __init__( self : Tuple , lowerCAmelCase_ : Optional[int] ) -> List[str]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : int = config UpperCAmelCase_ : Optional[int] = BertEmbeddings(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = DeeBertEncoder(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = BertPooler(lowerCAmelCase_ ) self.init_weights() def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: self.encoder.init_highway_pooler(self.pooler ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return self.embeddings.word_embeddings def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> Any: UpperCAmelCase_ : Optional[Any] = value def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Optional[int] ) -> List[Any]: for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(lowerCAmelCase_ ) @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : int=None , ) -> Optional[int]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: UpperCAmelCase_ : Optional[int] = input_ids.size() elif inputs_embeds is not None: UpperCAmelCase_ : Tuple = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds" ) UpperCAmelCase_ : Any = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: UpperCAmelCase_ : int = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ ) if encoder_attention_mask is None: UpperCAmelCase_ : Dict = torch.ones(lowerCAmelCase_ , device=lowerCAmelCase_ ) if token_type_ids is None: UpperCAmelCase_ : Any = torch.zeros(lowerCAmelCase_ , dtype=torch.long , device=lowerCAmelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. UpperCAmelCase_ : torch.Tensor = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_attention_mask.dim() == 3: UpperCAmelCase_ : List[Any] = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.dim() == 2: UpperCAmelCase_ : List[Any] = encoder_attention_mask[:, None, None, :] UpperCAmelCase_ : Tuple = encoder_extended_attention_mask.to( dtype=next(self.parameters() ).dtype ) # fp16 compatibility UpperCAmelCase_ : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0 # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] UpperCAmelCase_ : List[Any] = self.get_head_mask(lowerCAmelCase_ , self.config.num_hidden_layers ) UpperCAmelCase_ : List[Any] = self.embeddings( input_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ ) UpperCAmelCase_ : str = self.encoder( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , ) UpperCAmelCase_ : Any = encoder_outputs[0] UpperCAmelCase_ : Dict = self.pooler(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = ( sequence_output, pooled_output, ) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits class UpperCamelCase_ (__A ): def __init__( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : List[str] = message UpperCAmelCase_ : Any = exit_layer # start from 1! class UpperCamelCase_ (nn.Module ): def __init__( self : str , lowerCAmelCase_ : Tuple ) -> Dict: super().__init__() UpperCAmelCase_ : Optional[int] = BertPooler(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase_ : Tuple = nn.Linear(config.hidden_size , config.num_labels ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] ) -> Optional[int]: # Pooler UpperCAmelCase_ : Optional[int] = encoder_outputs[0] UpperCAmelCase_ : Any = self.pooler(lowerCAmelCase_ ) # "return" pooler_output # BertModel UpperCAmelCase_ : Any = (pooler_input, pooler_output) + encoder_outputs[1:] # "return" bmodel_output # Dropout and classification UpperCAmelCase_ : Tuple = bmodel_output[1] UpperCAmelCase_ : Union[str, Any] = self.dropout(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.classifier(lowerCAmelCase_ ) return logits, pooled_output @add_start_docstrings( '''Bert Model (with early exiting - DeeBERT) with a classifier on top, also takes care of multi-layer training. ''' , __A , ) class UpperCamelCase_ (__A ): def __init__( self : List[Any] , lowerCAmelCase_ : List[str] ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : int = config.num_labels UpperCAmelCase_ : List[Any] = config.num_hidden_layers UpperCAmelCase_ : Dict = DeeBertModel(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob ) UpperCAmelCase_ : Dict = nn.Linear(config.hidden_size , self.config.num_labels ) self.init_weights() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[Any]=-1 , lowerCAmelCase_ : List[str]=False , ) -> Tuple: UpperCAmelCase_ : Dict = self.num_layers try: UpperCAmelCase_ : Union[str, Any] = self.bert( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , position_ids=lowerCAmelCase_ , head_mask=lowerCAmelCase_ , inputs_embeds=lowerCAmelCase_ , ) # sequence_output, pooled_output, (hidden_states), (attentions), highway exits UpperCAmelCase_ : Union[str, Any] = outputs[1] UpperCAmelCase_ : Optional[Any] = self.dropout(lowerCAmelCase_ ) UpperCAmelCase_ : str = self.classifier(lowerCAmelCase_ ) UpperCAmelCase_ : Any = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: UpperCAmelCase_ : Tuple = e.message UpperCAmelCase_ : Optional[Any] = e.exit_layer UpperCAmelCase_ : Optional[int] = outputs[0] if not self.training: UpperCAmelCase_ : Tuple = entropy(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : List[str] = [] if labels is not None: if self.num_labels == 1: # We are doing regression UpperCAmelCase_ : str = MSELoss() UpperCAmelCase_ : str = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase_ : str = CrossEntropyLoss() UpperCAmelCase_ : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits UpperCAmelCase_ : int = [] for highway_exit in outputs[-1]: UpperCAmelCase_ : int = highway_exit[0] if not self.training: highway_logits_all.append(lowerCAmelCase_ ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression UpperCAmelCase_ : str = MSELoss() UpperCAmelCase_ : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: UpperCAmelCase_ : Optional[int] = CrossEntropyLoss() UpperCAmelCase_ : str = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(lowerCAmelCase_ ) if train_highway: UpperCAmelCase_ : List[str] = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: UpperCAmelCase_ : Dict = (loss,) + outputs if not self.training: UpperCAmelCase_ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: UpperCAmelCase_ : Optional[Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
268
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCamelCase_ : # setable values __magic_name__ = None __magic_name__ = None __magic_name__ = None # sigma(t_i) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]: return cls() @dataclass class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 class UpperCamelCase_ (__A , __A ): @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return True @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: return KarrasVeSchedulerState.create() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState: UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy() UpperCAmelCase_ : Dict = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 ) UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape ) UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict: raise NotImplementedError()
268
1
"""simple docstring""" from __future__ import annotations from random import random class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : int | None = None ) -> Any: UpperCAmelCase_ : Union[str, Any] = value UpperCAmelCase_ : List[str] = random() UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def __repr__( self : Dict ) -> str: from pprint import pformat if self.left is None and self.right is None: return f"""'{self.value}: {self.prior:.5}'""" else: return pformat( {f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 ) def __str__( self : Optional[int] ) -> str: UpperCAmelCase_ : Optional[int] = str(self.value ) + " " UpperCAmelCase_ : List[Any] = str(self.left or "" ) UpperCAmelCase_ : Optional[int] = str(self.right or "" ) return value + left + right def snake_case ( A__ ,A__ ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = split(root.left ,A__ ) return left, root else: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = split(root.right ,A__ ) return root, right def snake_case ( A__ ,A__ ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: UpperCAmelCase_ : Tuple = merge(left.right ,A__ ) return left else: UpperCAmelCase_ : int = merge(A__ ,right.left ) return right def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Tuple = Node(A__ ) UpperCAmelCase_ , UpperCAmelCase_ : int = split(A__ ,A__ ) return merge(merge(A__ ,A__ ) ,A__ ) def snake_case ( A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : List[str] = split(A__ ,value - 1 ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = split(A__ ,A__ ) return merge(A__ ,A__ ) def snake_case ( A__ ): if not root: # None return else: inorder(root.left ) print(root.value ,end="," ) inorder(root.right ) def snake_case ( A__ ,A__ ): for arg in args.split(): if arg[0] == "+": UpperCAmelCase_ : Union[str, Any] = insert(A__ ,int(arg[1:] ) ) elif arg[0] == "-": UpperCAmelCase_ : Optional[Any] = erase(A__ ,int(arg[1:] ) ) else: print("Unknown command" ) return root def snake_case ( ): UpperCAmelCase_ : str = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) UpperCAmelCase_ : Union[str, Any] = input() while args != "q": UpperCAmelCase_ : Union[str, Any] = interact_treap(A__ ,A__ ) print(A__ ) UpperCAmelCase_ : List[Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
268
"""simple docstring""" def snake_case ( A__ ,A__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps UpperCAmelCase_ : Optional[int] = boundary[0] UpperCAmelCase_ : str = boundary[1] UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ ) UpperCAmelCase_ : List[str] = 0.0 y += (h / 2.0) * f(A__ ) for i in x_i: # print(i) y += h * f(A__ ) y += (h / 2.0) * f(A__ ) return y def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase_ : Optional[Any] = x + h def snake_case ( A__ ): # enter your function here UpperCAmelCase_ : Dict = (x - 0) * (x - 0) return y def snake_case ( ): UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
268
1
"""simple docstring""" import numpy as np def snake_case ( A__ ): return 1 / (1 + np.exp(-vector )) def snake_case ( A__ ): return vector * sigmoid(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def snake_case ( A__ ,A__ ,A__ ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] UpperCAmelCase_ : Dict = (low + high) // 2 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1 UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1 UpperCAmelCase_ : int | float = 0 for i in range(A__ ,low - 1 ,-1 ): summ += arr[i] if summ > left_sum: UpperCAmelCase_ : str = summ UpperCAmelCase_ : Any = i UpperCAmelCase_ : Dict = 0 for i in range(mid + 1 ,high + 1 ): summ += arr[i] if summ > right_sum: UpperCAmelCase_ : List[Any] = summ UpperCAmelCase_ : Optional[Any] = i return max_left, max_right, (left_sum + right_sum) def snake_case ( A__ ): UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )] UpperCAmelCase_ : str = time.time() max_subarray(A__ ,0 ,input_size - 1 ) UpperCAmelCase_ : int = time.time() return end - start def snake_case ( ): UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00] UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(A__ ,A__ ): print(A__ ,"\t\t" ,A__ ) plt.plot(A__ ,A__ ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
268
1
"""simple docstring""" def snake_case ( A__ ,A__ ): return int((input_a, input_a).count(0 ) == 0 ) def snake_case ( ): assert and_gate(0 ,0 ) == 0 assert and_gate(0 ,1 ) == 0 assert and_gate(1 ,0 ) == 0 assert and_gate(1 ,1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
268
"""simple docstring""" from __future__ import annotations import time lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict: UpperCAmelCase_ : Any = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : int = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Union[str, Any] = parent class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None: while self.node_queue: UpperCAmelCase_ : str = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[Any] = True return self.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ ) for node in successors: self.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : List[str] = [] for action in delta: UpperCAmelCase_ : List[Any] = parent.pos_x + action[1] UpperCAmelCase_ : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) ) return successors def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : Union[str, Any] = node UpperCAmelCase_ : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase_ : str = True return self.retrace_bidirectional_path( lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = current_bwd_node UpperCAmelCase_ : List[str] = current_fwd_node UpperCAmelCase_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path: UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ = time.time() lowerCamelCase_ = BreadthFirstSearch(init, goal) lowerCamelCase_ = bfs.search() lowerCamelCase_ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCamelCase_ = time.time() lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ = bd_bfs.search() lowerCamelCase_ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
268
1
"""simple docstring""" from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig lowerCamelCase_ = { '''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''', '''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''ernie_m''' __magic_name__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self : List[str] , lowerCAmelCase_ : int = 250_002 , lowerCAmelCase_ : int = 768 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 12 , lowerCAmelCase_ : int = 3_072 , lowerCAmelCase_ : str = "gelu" , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : float = 0.1 , lowerCAmelCase_ : int = 514 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 1e-05 , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Optional[int]=0.0 , **lowerCAmelCase_ : Tuple , ) -> List[str]: super().__init__(pad_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = vocab_size UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : List[str] = num_attention_heads UpperCAmelCase_ : int = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : Any = attention_probs_dropout_prob UpperCAmelCase_ : int = max_position_embeddings UpperCAmelCase_ : int = initializer_range UpperCAmelCase_ : Dict = layer_norm_eps UpperCAmelCase_ : Tuple = classifier_dropout UpperCAmelCase_ : Union[str, Any] = is_decoder UpperCAmelCase_ : Optional[Any] = act_dropout
268
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = MBartTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Tuple = vocab_file UpperCAmelCase_ : str = False if not self.vocab_file else True UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCAmelCase_ : Tuple = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase_ : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : List[str] = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding: UpperCAmelCase_ : List[Any] = src_lang UpperCAmelCase_ : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None: UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCAmelCase_ : List[str] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
268
1
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar lowerCamelCase_ = TypeVar('''T''') class UpperCamelCase_ (Generic[T] ): __magic_name__ = 42 # Cache store of keys __magic_name__ = 42 # References of the keys in cache __magic_name__ = 10 # Maximum capacity of cache def __init__( self : List[str] , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Union[str, Any] = deque() UpperCAmelCase_ : Optional[Any] = set() if not n: UpperCAmelCase_ : Union[str, Any] = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: UpperCAmelCase_ : Optional[int] = n def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: UpperCAmelCase_ : List[str] = self.dq_store.pop() self.key_reference.remove(lowerCAmelCase_ ) else: self.dq_store.remove(lowerCAmelCase_ ) self.dq_store.appendleft(lowerCAmelCase_ ) self.key_reference.add(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None: for k in self.dq_store: print(lowerCAmelCase_ ) def __repr__( self : int ) -> str: return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() lowerCamelCase_ = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
268
"""simple docstring""" from torch import nn def snake_case ( A__ ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
268
1
"""simple docstring""" from __future__ import annotations lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float , lowerCAmelCase_ : Node | None , ) -> Dict: UpperCAmelCase_ : List[str] = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : Any = (pos_y, pos_x) UpperCAmelCase_ : Optional[Any] = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Tuple = g_cost UpperCAmelCase_ : Optional[Any] = parent UpperCAmelCase_ : Optional[int] = self.calculate_heuristic() def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float: UpperCAmelCase_ : Any = abs(self.pos_x - self.goal_x ) UpperCAmelCase_ : int = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self : List[Any] , lowerCAmelCase_ : Optional[Any] ) -> bool: return self.f_cost < other.f_cost class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Any: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ ) UpperCAmelCase_ : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : list[Node] = [] UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() UpperCAmelCase_ : Any = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Union[str, Any] = True return self.retrace_path(lowerCAmelCase_ ) self.closed_nodes.append(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.get_successors(lowerCAmelCase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowerCAmelCase_ ) else: # retrieve the best current path UpperCAmelCase_ : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowerCAmelCase_ ) else: self.open_nodes.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : Tuple = [] for action in delta: UpperCAmelCase_ : List[str] = parent.pos_x + action[1] UpperCAmelCase_ : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) ) return successors def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : List[str] = node UpperCAmelCase_ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print('''------''') lowerCamelCase_ = GreedyBestFirst(init, goal) lowerCamelCase_ = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCamelCase_ = 2 for elem in grid: print(elem)
268
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class UpperCamelCase_ : def __init__( self : str ) -> Dict: UpperCAmelCase_ : List[Any] = "" UpperCAmelCase_ : int = "" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : List[Any] = 256 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[str] = 0 def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 ) UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): UpperCAmelCase_ : List[Any] = x[i] / self.k self.sk += prk UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk if self.rem != 0: UpperCAmelCase_ : Any = int(last % last ) UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) UpperCAmelCase_ : Dict = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCAmelCase_ : Any = self.img[j][i] if num != self.last_list[num]: UpperCAmelCase_ : Tuple = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCamelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
268
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCamelCase_ (__A ): __magic_name__ = '''umt5''' __magic_name__ = ['''past_key_values'''] def __init__( self : List[Any] , lowerCAmelCase_ : str=250_112 , lowerCAmelCase_ : str=512 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Union[str, Any]=1_024 , lowerCAmelCase_ : Dict=8 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Union[str, Any]=6 , lowerCAmelCase_ : Optional[Any]=32 , lowerCAmelCase_ : str=128 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Union[str, Any]=1e-6 , lowerCAmelCase_ : Tuple=1.0 , lowerCAmelCase_ : List[str]="gated-gelu" , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Optional[Any]="T5Tokenizer" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : int=0 , **lowerCAmelCase_ : List[Any] , ) -> Optional[Any]: super().__init__( is_encoder_decoder=lowerCAmelCase_ , tokenizer_class=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Union[str, Any] = d_model UpperCAmelCase_ : Tuple = d_kv UpperCAmelCase_ : int = d_ff UpperCAmelCase_ : Dict = num_layers UpperCAmelCase_ : int = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : str = num_heads UpperCAmelCase_ : Dict = relative_attention_num_buckets UpperCAmelCase_ : Dict = relative_attention_max_distance UpperCAmelCase_ : int = dropout_rate UpperCAmelCase_ : List[Any] = layer_norm_epsilon UpperCAmelCase_ : List[str] = initializer_factor UpperCAmelCase_ : Tuple = feed_forward_proj UpperCAmelCase_ : List[Any] = use_cache UpperCAmelCase_ : Optional[Any] = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : str = act_info[-1] UpperCAmelCase_ : str = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : Union[str, Any] = "gelu_new" @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]: return self.d_model @property def _SCREAMING_SNAKE_CASE ( self : int ) -> Any: return self.num_heads @property def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: return self.num_layers class UpperCamelCase_ (__A ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : List[Any] = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : Dict = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : Dict = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Optional[int] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 13 @property def _SCREAMING_SNAKE_CASE ( self : str ) -> float: return 5e-4
268
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : str = image_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Tuple = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : int = depths UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : str = scope UpperCAmelCase_ : str = len(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[Any] = layer_type UpperCAmelCase_ : int = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ): UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple() def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ): if isinstance(lowerCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
268
1
"""simple docstring""" import inspect import unittest import warnings from transformers import DeiTConfig from transformers.models.auto import get_values from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_MAPPING, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, ) from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : str=30 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Any=5 , lowerCAmelCase_ : int=4 , lowerCAmelCase_ : Any=37 , lowerCAmelCase_ : Optional[int]="gelu" , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : str=0.0_2 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=2 , ) -> Optional[Any]: UpperCAmelCase_ : str = parent UpperCAmelCase_ : str = batch_size UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Optional[Any] = num_channels UpperCAmelCase_ : str = is_training UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : str = num_hidden_layers UpperCAmelCase_ : Optional[int] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : Tuple = hidden_dropout_prob UpperCAmelCase_ : str = attention_probs_dropout_prob UpperCAmelCase_ : Any = type_sequence_label_size UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Optional[Any] = scope UpperCAmelCase_ : Optional[Any] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) UpperCAmelCase_ : Optional[Any] = (image_size // patch_size) ** 2 UpperCAmelCase_ : Optional[Any] = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[int] = None if self.use_labels: UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Any = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : str = DeiTModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : int = DeiTForMaskedImageModeling(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : str = model(lowerCAmelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : Union[str, Any] = 1 UpperCAmelCase_ : List[str] = DeiTForMaskedImageModeling(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.type_sequence_label_size UpperCAmelCase_ : Dict = DeiTForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : str = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Any = 1 UpperCAmelCase_ : Dict = DeiTForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : List[str] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase_ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DeiTModel, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DeiTModel, '''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher), } if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Tuple = DeiTModelTester(self ) UpperCAmelCase_ : Tuple = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason="DeiT does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()] UpperCAmelCase_ : str = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any]=False ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "DeiTForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: if not self.model_tester.is_training: return UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Union[str, Any] = True for model_class in self.all_model_classes: # DeiTForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(lowerCAmelCase_ ) or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase_ : Union[str, Any] = False UpperCAmelCase_ : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing: continue # DeiTForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "DeiTForImageClassificationWithTeacher": continue UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ ) model.gradient_checkpointing_enable() model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : int = model(**lowerCAmelCase_ ).loss loss.backward() def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = [ {"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float}, {"title": "single_label_classification", "num_labels": 1, "dtype": torch.long}, {"title": "regression", "num_labels": 1, "dtype": torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(lowerCAmelCase_ ), *get_values(lowerCAmelCase_ ), ] or model_class.__name__ == "DeiTForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ): UpperCAmelCase_ : List[Any] = problem_type["title"] UpperCAmelCase_ : List[str] = problem_type["num_labels"] UpperCAmelCase_ : Any = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.train() UpperCAmelCase_ : Optional[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if problem_type["num_labels"] > 1: UpperCAmelCase_ : Union[str, Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] ) UpperCAmelCase_ : Optional[int] = inputs["labels"].to(problem_type["dtype"] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list: UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f"""Something is going wrong in the regression problem: intercepted {w.message}""" ) loss.backward() @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DeiTModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: return ( DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: UpperCAmelCase_ : Any = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to( lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : Optional[int] = prepare_img() UpperCAmelCase_ : Any = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Dict = model(**lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : str = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) @slow @require_accelerate @require_torch_gpu def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained( "facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" ) UpperCAmelCase_ : Optional[int] = self.default_image_processor UpperCAmelCase_ : Union[str, Any] = prepare_img() UpperCAmelCase_ : int = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ) UpperCAmelCase_ : Optional[int] = inputs.pixel_values.to(lowerCAmelCase_ ) # forward pass to make sure inference works in fp16 with torch.no_grad(): UpperCAmelCase_ : int = model(lowerCAmelCase_ )
268
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class UpperCamelCase_ : __magic_name__ = '''dummy_data''' __magic_name__ = '''datasets''' __magic_name__ = False def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple: UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : int = dataset_name UpperCAmelCase_ : Optional[int] = cache_dir UpperCAmelCase_ : Tuple = use_local_dummy_data UpperCAmelCase_ : int = config # download_callbacks take a single url as input UpperCAmelCase_ : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general UpperCAmelCase_ : Dict = str(lowerCAmelCase_ ) # to be downloaded UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : int = None @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if self._dummy_file is None: UpperCAmelCase_ : List[str] = self.download_dummy_data() return self._dummy_file @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: UpperCAmelCase_ : int = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) UpperCAmelCase_ : Union[str, Any] = cached_path( lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ ) return os.path.join(lowerCAmelCase_ , self.dummy_file_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: if self._bucket_url is None: UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]: if self.load_existing_dummy_data: # dummy data is downloaded and tested UpperCAmelCase_ : Dict = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned UpperCAmelCase_ : Optional[int] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , (list, tuple) ): return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ ) else: return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]: return path def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: return {} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for single_url in single_urls: download_callback(lowerCAmelCase_ ) else: UpperCAmelCase_ : Tuple = single_urls download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls] else: UpperCAmelCase_ : Optional[int] = single_urls UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) UpperCAmelCase_ : int = value # make sure that values are unique if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict: UpperCAmelCase_ : str = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url ) UpperCAmelCase_ : Union[str, Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(lowerCAmelCase_ ) return dummy_data_list def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]: def _iter_archive_members(lowerCAmelCase_ : Dict ): # this preserves the order of the members inside the ZIP archive UpperCAmelCase_ : str = Path(self.dummy_file ).parent UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: UpperCAmelCase_ : str = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : str = [paths] for path in paths: if os.path.isfile(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(lowerCAmelCase_ ): if filename.startswith((".", "__") ): continue yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
"""simple docstring""" lowerCamelCase_ = [ (1000, '''M'''), (900, '''CM'''), (500, '''D'''), (400, '''CD'''), (100, '''C'''), (90, '''XC'''), (50, '''L'''), (40, '''XL'''), (10, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def snake_case ( A__ ): UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00} UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Tuple = 0 while place < len(A__ ): if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = [] for arabic, roman in ROMAN: ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ ) result.append(roman * factor ) if number == 0: break return "".join(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import logging import os from logging import ( CRITICAL, # NOQA DEBUG, # NOQA ERROR, # NOQA FATAL, # NOQA INFO, # NOQA NOTSET, # NOQA WARN, # NOQA WARNING, # NOQA ) from typing import Optional from tqdm import auto as tqdm_lib lowerCamelCase_ = { '''debug''': logging.DEBUG, '''info''': logging.INFO, '''warning''': logging.WARNING, '''error''': logging.ERROR, '''critical''': logging.CRITICAL, } lowerCamelCase_ = logging.WARNING def snake_case ( ): UpperCAmelCase_ : str = os.getenv("DATASETS_VERBOSITY" ,A__ ) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( F"""Unknown option DATASETS_VERBOSITY={env_level_str}, """ F"""has to be one of: { ', '.join(log_levels.keys() ) }""" ) return _default_log_level def snake_case ( ): return __name__.split("." )[0] def snake_case ( ): return logging.getLogger(_get_library_name() ) def snake_case ( ): # Apply our default configuration to the library root logger. UpperCAmelCase_ : List[Any] = _get_library_root_logger() library_root_logger.setLevel(_get_default_logging_level() ) def snake_case ( ): UpperCAmelCase_ : Dict = _get_library_root_logger() library_root_logger.setLevel(logging.NOTSET ) def snake_case ( A__ = None ): if name is None: UpperCAmelCase_ : Optional[Any] = _get_library_name() return logging.getLogger(A__ ) def snake_case ( ): return _get_library_root_logger().getEffectiveLevel() def snake_case ( A__ ): _get_library_root_logger().setLevel(A__ ) def snake_case ( ): return set_verbosity(A__ ) def snake_case ( ): return set_verbosity(A__ ) def snake_case ( ): return set_verbosity(A__ ) def snake_case ( ): return set_verbosity(A__ ) def snake_case ( ): UpperCAmelCase_ : int = False def snake_case ( ): UpperCAmelCase_ : Any = True # Configure the library root logger at the module level (singleton-like) _configure_library_root_logger() class UpperCamelCase_ : def __init__( self : str , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[int] ) -> Any: # pylint: disable=unused-argument UpperCAmelCase_ : List[str] = args[0] if args else None def __iter__( self : List[Any] ) -> int: return iter(self._iterator ) def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Dict: def empty_fn(*lowerCAmelCase_ : Dict , **lowerCAmelCase_ : str ): # pylint: disable=unused-argument return return empty_fn def __enter__( self : Optional[Any] ) -> int: return self def __exit__( self : List[str] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Any ) -> Optional[int]: return lowerCamelCase_ = True class UpperCamelCase_ : def __call__( self : Optional[int] , *lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str=False , **lowerCAmelCase_ : Optional[int] ) -> Any: if _tqdm_active and not disable: return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) else: return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Dict ) -> Any: UpperCAmelCase_ : Tuple = None if _tqdm_active: return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: if _tqdm_active: return tqdm_lib.tqdm.get_lock() lowerCamelCase_ = _tqdm_cls() def snake_case ( ): global _tqdm_active return bool(_tqdm_active ) def snake_case ( ): global _tqdm_active UpperCAmelCase_ : Union[str, Any] = True def snake_case ( ): global _tqdm_active UpperCAmelCase_ : Optional[int] = False
268
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : List[str] = relative_attention UpperCAmelCase_ : List[Any] = position_biased_input UpperCAmelCase_ : Dict = pos_att_type UpperCAmelCase_ : Optional[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Tuple = None if self.use_input_mask: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = self.get_config() UpperCAmelCase_ : int = 300 return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = DebertaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ (unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" ) UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
268
1
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : str = image_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Tuple = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : int = depths UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : str = scope UpperCAmelCase_ : str = len(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[Any] = layer_type UpperCAmelCase_ : int = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ): UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple() def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ): if isinstance(lowerCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
268
"""simple docstring""" import os def snake_case ( ): with open(os.path.dirname(A__ ) + "/grid.txt" ) as f: UpperCAmelCase_ : Any = [] # noqa: E741 for _ in range(20 ): l.append([int(A__ ) for x in f.readline().split()] ) UpperCAmelCase_ : Any = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ : Any = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ : Tuple = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 ,20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp return maximum if __name__ == "__main__": print(solution())
268
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class UpperCamelCase_ (unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : str = "ZinengTang/tvlt-base" UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp() def _SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase_ : List[Any] ) -> List[str]: return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , **lowerCAmelCase_ : Dict ) -> Dict: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str: shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : str = self.get_image_processor() UpperCAmelCase_ : str = self.get_feature_extractor() UpperCAmelCase_ : Optional[Any] = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_ : Any = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , lowerCAmelCase_ ) self.assertIsInstance(processor.image_processor , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: UpperCAmelCase_ : Tuple = self.get_image_processor() UpperCAmelCase_ : List[str] = self.get_feature_extractor() UpperCAmelCase_ : List[Any] = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = np.ones([12_000] ) UpperCAmelCase_ : str = feature_extractor(lowerCAmelCase_ , return_tensors="np" ) UpperCAmelCase_ : Union[str, Any] = processor(audio=lowerCAmelCase_ , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Dict = self.get_image_processor() UpperCAmelCase_ : List[Any] = self.get_feature_extractor() UpperCAmelCase_ : Any = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = np.ones([3, 224, 224] ) UpperCAmelCase_ : Union[str, Any] = image_processor(lowerCAmelCase_ , return_tensors="np" ) UpperCAmelCase_ : int = processor(images=lowerCAmelCase_ , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]: UpperCAmelCase_ : int = self.get_image_processor() UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor() UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ ) UpperCAmelCase_ : Any = np.ones([12_000] ) UpperCAmelCase_ : List[Any] = np.ones([3, 224, 224] ) UpperCAmelCase_ : List[str] = processor(audio=lowerCAmelCase_ , images=lowerCAmelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(lowerCAmelCase_ ): processor() def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]: UpperCAmelCase_ : Optional[int] = self.get_image_processor() UpperCAmelCase_ : Optional[int] = self.get_feature_extractor() UpperCAmelCase_ : str = TvltProcessor(image_processor=lowerCAmelCase_ , feature_extractor=lowerCAmelCase_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
268
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def snake_case ( A__ ): UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 ) if "base" in model_name: UpperCAmelCase_ : Any = 6 UpperCAmelCase_ : Optional[Any] = 1_28 UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2) UpperCAmelCase_ : List[str] = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : int = 1_92 UpperCAmelCase_ : List[Any] = (2, 2, 18, 2) UpperCAmelCase_ : int = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase_ : str = window_size UpperCAmelCase_ : Any = embed_dim UpperCAmelCase_ : int = depths UpperCAmelCase_ : Any = num_heads return config def snake_case ( A__ ): if "encoder.mask_token" in name: UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" ) if "norm1" in name: UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase_ : List[str] = "layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase_ : int = "layernorm.bias" if "decoder" in name: pass else: UpperCAmelCase_ : Any = "swin." + name return name def snake_case ( A__ ,A__ ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase_ : Optional[int] = key.split("." ) UpperCAmelCase_ : str = int(key_split[2] ) UpperCAmelCase_ : Union[str, Any] = int(key_split[4] ) UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[Any] = val[:dim, :] UpperCAmelCase_ : str = val[ dim : dim * 2, : ] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : List[str] = val[ :dim ] UpperCAmelCase_ : str = val[ dim : dim * 2 ] UpperCAmelCase_ : Optional[Any] = val[ -dim: ] else: UpperCAmelCase_ : Tuple = val return orig_state_dict def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"] UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ ) UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ ) model.eval() UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ ) model.load_state_dict(A__ ) UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} ) UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw ) UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(**A__ ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase_ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
268
1
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ = 0 ,A__ = 0 ): UpperCAmelCase_ : List[Any] = right or len(A__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(A__ ,A__ ,left + 1 ,right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''rwkv''' __magic_name__ = {'''max_position_embeddings''': '''context_length'''} def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[str] = context_length UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : List[Any] = rescale_every UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : List[str] = bos_token_id UpperCAmelCase_ : Union[str, Any] = eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
268
1
"""simple docstring""" def snake_case ( A__ = 10 ,A__ = 22 ): UpperCAmelCase_ : Dict = range(1 ,A__ ) UpperCAmelCase_ : Optional[int] = range(1 ,A__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'{solution(10, 22) = }')
268
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
268
1
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self : Any , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def snake_case ( A__ ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def snake_case ( A__ ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def snake_case ( A__ ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def snake_case ( ): # Main function for testing. UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Any = Node(2 ) UpperCAmelCase_ : Optional[Any] = Node(3 ) UpperCAmelCase_ : Union[str, Any] = Node(4 ) UpperCAmelCase_ : int = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Any = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : List[Any] = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("Tree is: " ) display(A__ ) if __name__ == "__main__": main()
268
1
"""simple docstring""" lowerCamelCase_ = { '''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''', '''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''', '''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''', '''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''', '''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''', '''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''', ''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''', '''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''', '''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/''' } # Exclamation mark is not in ITU-R recommendation # fmt: on lowerCamelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()} def snake_case ( A__ ): return " ".join(MORSE_CODE_DICT[char] for char in message.upper() ) def snake_case ( A__ ): return "".join(REVERSE_DICT[char] for char in message.split() ) def snake_case ( ): UpperCAmelCase_ : List[Any] = "Morse code here!" print(A__ ) UpperCAmelCase_ : Optional[int] = encrypt(A__ ) print(A__ ) UpperCAmelCase_ : Optional[int] = decrypt(A__ ) print(A__ ) if __name__ == "__main__": main()
268
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): if index == number_of_items: return 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack( A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 ) return max(A__ ,A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } lowerCamelCase_ = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } lowerCamelCase_ = '''▁''' class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = BarthezTokenizer def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : Any="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : Any="<s>" , lowerCAmelCase_ : Any="<unk>" , lowerCAmelCase_ : Any="<pad>" , lowerCAmelCase_ : int="<mask>" , **lowerCAmelCase_ : Optional[int] , ) -> str: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Any = vocab_file UpperCAmelCase_ : int = False if not self.vocab_file else True def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase_ : List[str] = [self.cls_token_id] UpperCAmelCase_ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase_ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase_ : Optional[Any] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
268
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
1
"""simple docstring""" from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def snake_case ( A__ ): if isinstance(A__ ,collections.abc.Iterable ): return x return (x, x) @require_tf class UpperCamelCase_ : def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: pass def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Union[str, Any] ) -> Any: UpperCAmelCase_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = TFVisionTextDualEncoderModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=None , **lowerCAmelCase_ : str ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = {"vision_model": vision_model, "text_model": text_model} UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : List[str] ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) UpperCAmelCase_ : Any = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = after_output[0].numpy() UpperCAmelCase_ : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-5 ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : int ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = model( input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ ) UpperCAmelCase_ : str = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase_ : Dict = to_atuple(vision_model.config.image_size ) UpperCAmelCase_ : Optional[Any] = to_atuple(vision_model.config.patch_size ) UpperCAmelCase_ : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase_ : Dict = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase_ : Tuple = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ) -> str: UpperCAmelCase_ : Dict = np.abs((a - b) ).max() self.assertLessEqual(lowerCAmelCase_ , lowerCAmelCase_ , f"""Difference between torch and flax is {diff} (>= {tol}).""" ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: UpperCAmelCase_ : int = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: UpperCAmelCase_ : str = self.prepare_config_and_inputs() self.check_save_load(**lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.get_pretrained_model_and_inputs() UpperCAmelCase_ : str = model_a(**lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel.from_pretrained(lowerCAmelCase_ ) UpperCAmelCase_ : str = model_a(**lowerCAmelCase_ ) UpperCAmelCase_ : str = after_outputs[0].numpy() UpperCAmelCase_ : Optional[int] = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowerCAmelCase_ , 1e-5 ) @require_tf class UpperCamelCase_ (__A , unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: UpperCAmelCase_ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) UpperCAmelCase_ : Optional[int] = 13 UpperCAmelCase_ : Optional[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase_ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) UpperCAmelCase_ : Tuple = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : int = TFViTModel(lowerCAmelCase_ , name="vision_model" ) UpperCAmelCase_ : Optional[Any] = TFBertModel(lowerCAmelCase_ , name="text_model" ) return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = TFViTModelTester(self ) UpperCAmelCase_ : List[Any] = TFBertModelTester(self ) UpperCAmelCase_ : Optional[int] = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Any = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = vision_config_and_inputs ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[Any] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCamelCase_ (__A , unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. UpperCAmelCase_ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) UpperCAmelCase_ : Any = 13 UpperCAmelCase_ : List[str] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase_ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) UpperCAmelCase_ : Union[str, Any] = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.get_vision_text_model(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowerCAmelCase_ , text_model=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model( input_ids=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , output_attentions=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = output.vision_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase_ : List[Any] = to_atuple(vision_model.config.image_size ) UpperCAmelCase_ : Union[str, Any] = to_atuple(vision_model.config.patch_size ) UpperCAmelCase_ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase_ : str = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase_ : List[str] = output.text_model_output.attentions self.assertEqual(len(lowerCAmelCase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[Any] = TFDeiTModel(lowerCAmelCase_ , name="vision_model" ) UpperCAmelCase_ : str = TFRobertaModel(lowerCAmelCase_ , name="text_model" ) return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : int ) -> str: UpperCAmelCase_ : Dict = TFDeiTModelTester(self ) UpperCAmelCase_ : Optional[Any] = TFRobertaModelTester(self ) UpperCAmelCase_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : Tuple = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = vision_config_and_inputs ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class UpperCamelCase_ (__A , unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: UpperCAmelCase_ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) UpperCAmelCase_ : List[Any] = 13 UpperCAmelCase_ : Dict = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase_ : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) UpperCAmelCase_ : Optional[Any] = random_attention_mask([batch_size, 4] ) UpperCAmelCase_ : Dict = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> Optional[int]: UpperCAmelCase_ : Tuple = TFCLIPVisionModel(lowerCAmelCase_ , name="vision_model" ) UpperCAmelCase_ : Optional[int] = TFBertModel(lowerCAmelCase_ , name="text_model" ) return vision_model, text_model def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : str = TFCLIPVisionModelTester(self ) UpperCAmelCase_ : str = TFBertModelTester(self ) UpperCAmelCase_ : str = clip_model_tester.prepare_config_and_inputs() UpperCAmelCase_ : int = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ : List[str] = vision_config_and_inputs ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[int] = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class UpperCamelCase_ (unittest.TestCase ): @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> int: UpperCAmelCase_ : Any = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) UpperCAmelCase_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase_ : Dict = processor( text=["una foto di un gatto", "una foto di un cane"] , images=lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="np" ) UpperCAmelCase_ : Any = model(**lowerCAmelCase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCAmelCase_ : List[Any] = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowerCAmelCase_ , atol=1e-3 ) )
268
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class UpperCamelCase_ (__A ): def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]: UpperCAmelCase_ : str = {} if top_k is not None: UpperCAmelCase_ : List[str] = top_k return {}, {}, postprocess_params def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any: UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ ) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any: if top_k > self.model.config.num_labels: UpperCAmelCase_ : int = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ ) elif self.framework == "tf": UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase_ : int = scores.tolist() UpperCAmelCase_ : Optional[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
268
1
"""simple docstring""" def snake_case ( A__ ,A__ ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def snake_case ( A__ ,A__=0 ): return sorted(A__ ,key=lambda A__ : x[column] ) def snake_case ( A__ ,A__ ,A__=float("inf" ) ): for i in range(points_counts - 1 ): for j in range(i + 1 ,A__ ): UpperCAmelCase_ : List[Any] = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: UpperCAmelCase_ : Tuple = current_dis return min_dis def snake_case ( A__ ,A__ ,A__=float("inf" ) ): for i in range(min(6 ,points_counts - 1 ) ,A__ ): for j in range(max(0 ,i - 6 ) ,A__ ): UpperCAmelCase_ : str = euclidean_distance_sqr(points[i] ,points[j] ) if current_dis < min_dis: UpperCAmelCase_ : int = current_dis return min_dis def snake_case ( A__ ,A__ ,A__ ): # base case if points_counts <= 3: return dis_between_closest_pair(A__ ,A__ ) # recursion UpperCAmelCase_ : int = points_counts // 2 UpperCAmelCase_ : Optional[int] = closest_pair_of_points_sqr( A__ ,points_sorted_on_y[:mid] ,A__ ) UpperCAmelCase_ : Optional[int] = closest_pair_of_points_sqr( A__ ,points_sorted_on_y[mid:] ,points_counts - mid ) UpperCAmelCase_ : str = min(A__ ,A__ ) UpperCAmelCase_ : str = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(A__ ) UpperCAmelCase_ : Tuple = dis_between_closest_in_strip( A__ ,len(A__ ) ,A__ ) return min(A__ ,A__ ) def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Dict = column_based_sort(A__ ,column=0 ) UpperCAmelCase_ : str = column_based_sort(A__ ,column=1 ) return ( closest_pair_of_points_sqr( A__ ,A__ ,A__ ) ) ** 0.5 if __name__ == "__main__": lowerCamelCase_ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print('''Distance:''', closest_pair_of_points(points, len(points)))
268
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCamelCase_ (__A ): __magic_name__ = '''detr''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : str = use_timm_backbone UpperCAmelCase_ : Optional[Any] = backbone_config UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Dict = num_queries UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = encoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = encoder_layers UpperCAmelCase_ : Optional[int] = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : Optional[int] = decoder_attention_heads UpperCAmelCase_ : List[Any] = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : List[str] = activation_function UpperCAmelCase_ : Optional[int] = init_std UpperCAmelCase_ : Union[str, Any] = init_xavier_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Any = auxiliary_loss UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : List[str] = backbone UpperCAmelCase_ : int = use_pretrained_backbone UpperCAmelCase_ : Any = dilation # Hungarian matcher UpperCAmelCase_ : str = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : List[str] = mask_loss_coefficient UpperCAmelCase_ : Dict = dice_loss_coefficient UpperCAmelCase_ : Any = bbox_loss_coefficient UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase_ : int = eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self.d_model @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]: return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]: UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : Any = self.__class__.model_type return output class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 12
268
1
"""simple docstring""" # DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = None def snake_case ( A__ ,A__=0.999 ,A__="cosine" ,): if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) UpperCAmelCase_ : List[str] = [] for i in range(A__ ): UpperCAmelCase_ : List[str] = i / num_diffusion_timesteps UpperCAmelCase_ : Union[str, Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) ,A__ ) ) return torch.tensor(A__ ,dtype=torch.floataa ) class UpperCamelCase_ (__A , __A ): __magic_name__ = 1 @register_to_config def __init__( self : List[Any] , lowerCAmelCase_ : int = 1_000 , lowerCAmelCase_ : float = 0.0_0_0_1 , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "epsilon" , lowerCAmelCase_ : float = 1.0 , **lowerCAmelCase_ : Optional[int] , ) -> Any: if kwargs.get("set_alpha_to_one" , lowerCAmelCase_ ) is not None: UpperCAmelCase_ : Tuple = ( "The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead." ) deprecate("set_alpha_to_one" , "1.0.0" , lowerCAmelCase_ , standard_warn=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = kwargs["set_alpha_to_one"] if trained_betas is not None: UpperCAmelCase_ : List[Any] = torch.tensor(lowerCAmelCase_ , dtype=torch.floataa ) elif beta_schedule == "linear": UpperCAmelCase_ : Any = torch.linspace(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. UpperCAmelCase_ : int = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase_ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule UpperCAmelCase_ : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase_ ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) UpperCAmelCase_ : List[str] = 1.0 - self.betas UpperCAmelCase_ : Dict = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. UpperCAmelCase_ : List[Any] = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution UpperCAmelCase_ : Tuple = 1.0 # setable values UpperCAmelCase_ : str = None UpperCAmelCase_ : List[str] = torch.from_numpy(np.arange(0 , lowerCAmelCase_ ).copy().astype(np.intaa ) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Optional[int] = None ) -> torch.FloatTensor: return sample def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, torch.device] = None ) -> str: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f"""`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:""" f""" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle""" f""" maximal {self.config.num_train_timesteps} timesteps.""" ) UpperCAmelCase_ : List[Any] = num_inference_steps UpperCAmelCase_ : List[Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 UpperCAmelCase_ : str = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round().copy().astype(np.intaa ) UpperCAmelCase_ : Dict = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ ) self.timesteps += self.config.steps_offset def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) UpperCAmelCase_ : Dict = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process UpperCAmelCase_ : Optional[Any] = self.alphas_cumprod[timestep] UpperCAmelCase_ : Any = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) UpperCAmelCase_ : Tuple = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase_ : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 UpperCAmelCase_ : int = model_output elif self.config.prediction_type == "sample": UpperCAmelCase_ : Tuple = model_output UpperCAmelCase_ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": UpperCAmelCase_ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output UpperCAmelCase_ : Union[str, Any] = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or""" " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: UpperCAmelCase_ : Tuple = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ : List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf UpperCAmelCase_ : Optional[int] = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ ) def __len__( self : Optional[Any] ) -> Dict: return self.config.num_train_timesteps
268
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) lowerCamelCase_ = logging.getLogger() def snake_case ( ): UpperCAmelCase_ : Dict = argparse.ArgumentParser() parser.add_argument("-f" ) UpperCAmelCase_ : int = parser.parse_args() return args.f def snake_case ( A__ ): UpperCAmelCase_ : int = {} UpperCAmelCase_ : Optional[int] = os.path.join(A__ ,"all_results.json" ) if os.path.exists(A__ ): with open(A__ ,"r" ) as f: UpperCAmelCase_ : Any = json.load(A__ ) else: raise ValueError(F"""can't find {path}""" ) return results def snake_case ( ): UpperCAmelCase_ : int = torch.cuda.is_available() and torch_device == "cuda" return is_using_cuda and is_apex_available() lowerCamelCase_ = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class UpperCamelCase_ (__A ): @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Tuple: # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU UpperCAmelCase_ : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase_ : Tuple = os.path.join(cls.tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ : Any = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> Optional[Any]: shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Optional[int] = f""" {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ : int = get_results(lowerCAmelCase_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "glue_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Optional[Any] = f""" {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking """.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) UpperCAmelCase_ : str = get_results(lowerCAmelCase_ ) self.assertLess(result["perplexity"] , 100 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "clm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Tuple = f""" {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : Optional[Any] = get_results(lowerCAmelCase_ ) self.assertLess(result["perplexity"] , 42 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "mlm_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple: # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu UpperCAmelCase_ : Dict = 7 if get_gpu_count() > 1 else 2 UpperCAmelCase_ : List[Any] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Optional[int] = f""" {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : List[Any] = get_results(lowerCAmelCase_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 ) self.assertLess(result["train_loss"] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "ner_no_trainer" ) ) ) @unittest.skip(reason="Fix me @muellerzr" ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: UpperCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Optional[Any] = f""" {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : List[str] = get_results(lowerCAmelCase_ ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result["eval_f1"] , 28 ) self.assertGreaterEqual(result["eval_exact"] , 28 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "qa_no_trainer" ) ) ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: UpperCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Dict = f""" {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : int = get_results(lowerCAmelCase_ ) self.assertGreaterEqual(result["eval_accuracy"] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "swag_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: UpperCAmelCase_ : List[str] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : List[Any] = f""" {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : List[Any] = get_results(lowerCAmelCase_ ) self.assertGreaterEqual(result["eval_rouge1"] , 10 ) self.assertGreaterEqual(result["eval_rouge2"] , 2 ) self.assertGreaterEqual(result["eval_rougeL"] , 7 ) self.assertGreaterEqual(result["eval_rougeLsum"] , 7 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "summarization_no_trainer" ) ) ) @slow @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Optional[int] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Optional[Any] = f""" {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : int = get_results(lowerCAmelCase_ ) self.assertGreaterEqual(result["eval_bleu"] , 30 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "epoch_0" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "translation_no_trainer" ) ) ) @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: UpperCAmelCase_ : str = logging.StreamHandler(sys.stdout ) logger.addHandler(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Optional[int] = f""" {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch """.split() run_command(self._launch_args + testargs ) UpperCAmelCase_ : Optional[int] = get_results(lowerCAmelCase_ ) self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.1_0 ) @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: UpperCAmelCase_ : Optional[Any] = self.get_auto_remove_tmp_dir() UpperCAmelCase_ : Any = f""" {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 """.split() if is_cuda_and_apex_available(): testargs.append("--fp16" ) run_command(self._launch_args + testargs ) UpperCAmelCase_ : str = get_results(lowerCAmelCase_ ) # The base model scores a 25% self.assertGreaterEqual(result["eval_accuracy"] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "step_1" ) ) ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "image_classification_no_trainer" ) ) )
268
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
1
"""simple docstring""" def snake_case ( A__ ,A__ ): return price * (1 + tax_rate) if __name__ == "__main__": print(f'{price_plus_tax(100, 0.25) = }') print(f'{price_plus_tax(125.50, 0.05) = }')
268
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCamelCase_ : # setable values __magic_name__ = None __magic_name__ = None __magic_name__ = None # sigma(t_i) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]: return cls() @dataclass class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 class UpperCamelCase_ (__A , __A ): @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return True @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: return KarrasVeSchedulerState.create() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState: UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy() UpperCAmelCase_ : Dict = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 ) UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape ) UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict: raise NotImplementedError()
268
1
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) lowerCamelCase_ = '''\ Text data. Second line of data.''' lowerCamelCase_ = '''file''' @pytest.fixture(scope="session" ) def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd") UpperCAmelCase_ : str = bytes(A__ ,"utf-8" ) with zstd.open(A__ ,"wb" ) as f: f.write(A__ ) return path @pytest.fixture def snake_case ( A__ ): with open(os.path.join(tmpfs.local_root_dir ,A__ ) ,"w" ) as f: f.write(A__ ) return FILE_PATH @pytest.mark.parametrize("compression_format" ,["gzip", "xz", "zstd"] ) def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[str] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path} UpperCAmelCase_ : List[Any] = input_paths[compression_format] UpperCAmelCase_ : int = tmp_path / "cache" UpperCAmelCase_ : Optional[int] = DownloadConfig(cache_dir=A__ ,extract_compressed_file=A__ ) UpperCAmelCase_ : str = cached_path(A__ ,download_config=A__ ) with open(A__ ) as f: UpperCAmelCase_ : Any = f.read() with open(A__ ) as f: UpperCAmelCase_ : List[Any] = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize("default_extracted" ,[True, False] ) @pytest.mark.parametrize("default_cache_dir" ,[True, False] ) def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : Optional[Any] = "custom_cache" UpperCAmelCase_ : str = "custom_extracted_dir" UpperCAmelCase_ : Dict = tmp_path / "custom_extracted_path" if default_extracted: UpperCAmelCase_ : Dict = ("downloads" if default_cache_dir else custom_cache_dir, "extracted") else: monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" ,A__ ) monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" ,str(A__ ) ) UpperCAmelCase_ : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) UpperCAmelCase_ : str = xz_file UpperCAmelCase_ : Optional[Any] = ( DownloadConfig(extract_compressed_file=A__ ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir ,extract_compressed_file=A__ ) ) UpperCAmelCase_ : Tuple = cached_path(A__ ,download_config=A__ ) assert Path(A__ ).parent.parts[-2:] == expected def snake_case ( A__ ): # absolute path UpperCAmelCase_ : Union[str, Any] = str(Path(A__ ).resolve() ) assert cached_path(A__ ) == text_file # relative path UpperCAmelCase_ : Dict = str(Path(A__ ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(A__ ) == text_file def snake_case ( A__ ): # absolute path UpperCAmelCase_ : Dict = str(tmp_path.resolve() / "__missing_file__.txt" ) with pytest.raises(A__ ): cached_path(A__ ) # relative path UpperCAmelCase_ : Any = "./__missing_file__.txt" with pytest.raises(A__ ): cached_path(A__ ) def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = get_from_cache(F"""tmp://{tmpfs_file}""" ) with open(A__ ) as f: UpperCAmelCase_ : Optional[Any] = f.read() assert output_file_content == FILE_CONTENT @patch("datasets.config.HF_DATASETS_OFFLINE" ,A__ ) def snake_case ( ): with pytest.raises(A__ ): cached_path("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" ,A__ ) def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(A__ ): http_get("https://huggingface.co" ,temp_file=A__ ) with pytest.raises(A__ ): http_head("https://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" ,A__ ) def snake_case ( A__ ): UpperCAmelCase_ : str = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(A__ ): ftp_get("ftp://huggingface.co" ,temp_file=A__ ) with pytest.raises(A__ ): ftp_head("ftp://huggingface.co" ) @patch("datasets.config.HF_DATASETS_OFFLINE" ,A__ ) def snake_case ( A__ ): UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("data" ) / "file.html" with pytest.raises(A__ ): fsspec_get("s3://huggingface.co" ,temp_file=A__ ) with pytest.raises(A__ ): fsspec_head("s3://huggingface.co" )
268
"""simple docstring""" def snake_case ( A__ ,A__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps UpperCAmelCase_ : Optional[int] = boundary[0] UpperCAmelCase_ : str = boundary[1] UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ ) UpperCAmelCase_ : List[str] = 0.0 y += (h / 2.0) * f(A__ ) for i in x_i: # print(i) y += h * f(A__ ) y += (h / 2.0) * f(A__ ) return y def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase_ : Optional[Any] = x + h def snake_case ( A__ ): # enter your function here UpperCAmelCase_ : Dict = (x - 0) * (x - 0) return y def snake_case ( ): UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
268
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCamelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCamelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCamelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Optional[Any] = len([g for position, g in enumerate(A__ ) if g == main_target[position]] ) return (item, float(A__ )) def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Tuple = random.randint(0 ,len(A__ ) - 1 ) UpperCAmelCase_ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] UpperCAmelCase_ : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def snake_case ( A__ ,A__ ): UpperCAmelCase_ : List[str] = list(A__ ) if random.uniform(0 ,1 ) < MUTATION_PROBABILITY: UpperCAmelCase_ : Tuple = random.choice(A__ ) return "".join(A__ ) def snake_case ( A__ ,A__ ,A__ ,): UpperCAmelCase_ : Optional[int] = [] # Generate more children proportionally to the fitness score. UpperCAmelCase_ : Union[str, Any] = int(parent_a[1] * 1_00 ) + 1 UpperCAmelCase_ : Tuple = 10 if child_n >= 10 else child_n for _ in range(A__ ): UpperCAmelCase_ : Tuple = population_score[random.randint(0 ,A__ )][0] UpperCAmelCase_ , UpperCAmelCase_ : List[str] = crossover(parent_a[0] ,A__ ) # Append new string to the population list. pop.append(mutate(A__ ,A__ ) ) pop.append(mutate(A__ ,A__ ) ) return pop def snake_case ( A__ ,A__ ,A__ = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: UpperCAmelCase_ : List[str] = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(A__ ) # Verify that the target contains no genes besides the ones inside genes variable. UpperCAmelCase_ : List[Any] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: UpperCAmelCase_ : str = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(A__ ) # Generate random starting population. UpperCAmelCase_ : Optional[int] = [] for _ in range(A__ ): population.append("".join([random.choice(A__ ) for i in range(len(A__ ) )] ) ) # Just some logs to know what the algorithms is doing. UpperCAmelCase_ , UpperCAmelCase_ : List[str] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A__ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. UpperCAmelCase_ : Union[str, Any] = [evaluate(A__ ,A__ ) for item in population] # Check if there is a matching evolution. UpperCAmelCase_ : Dict = sorted(A__ ,key=lambda A__ : x[1] ,reverse=A__ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. UpperCAmelCase_ : Dict = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A__ ) # Normalize population score to be between 0 and 1. UpperCAmelCase_ : Optional[Any] = [ (item, score / len(A__ )) for item, score in population_score ] # This is selection for i in range(A__ ): population.extend(select(population_score[int(A__ )] ,A__ ,A__ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A__ ) > N_POPULATION: break if __name__ == "__main__": lowerCamelCase_ = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCamelCase_ = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = basic(target_str, genes_list) print( f'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}' )
268
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def snake_case ( A__ ,A__ ,A__ ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] UpperCAmelCase_ : Dict = (low + high) // 2 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1 UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1 UpperCAmelCase_ : int | float = 0 for i in range(A__ ,low - 1 ,-1 ): summ += arr[i] if summ > left_sum: UpperCAmelCase_ : str = summ UpperCAmelCase_ : Any = i UpperCAmelCase_ : Dict = 0 for i in range(mid + 1 ,high + 1 ): summ += arr[i] if summ > right_sum: UpperCAmelCase_ : List[Any] = summ UpperCAmelCase_ : Optional[Any] = i return max_left, max_right, (left_sum + right_sum) def snake_case ( A__ ): UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )] UpperCAmelCase_ : str = time.time() max_subarray(A__ ,0 ,input_size - 1 ) UpperCAmelCase_ : int = time.time() return end - start def snake_case ( ): UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00] UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(A__ ,A__ ): print(A__ ,"\t\t" ,A__ ) plt.plot(A__ ,A__ ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
268
1
"""simple docstring""" import itertools import json import os import unittest from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = RobertaTokenizer __magic_name__ = RobertaTokenizerFast __magic_name__ = True __magic_name__ = {'''cls_token''': '''<s>'''} def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase_ : Optional[Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] UpperCAmelCase_ : str = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) ) UpperCAmelCase_ : Union[str, Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] UpperCAmelCase_ : Any = {"unk_token": "<unk>"} UpperCAmelCase_ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowerCAmelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , **lowerCAmelCase_ : int ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , **lowerCAmelCase_ : int ) -> Optional[int]: kwargs.update(self.special_tokens_map ) return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = "lower newer" UpperCAmelCase_ : Any = "lower newer" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: UpperCAmelCase_ : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase_ : List[Any] = "lower newer" UpperCAmelCase_ : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] UpperCAmelCase_ : Dict = tokenizer.tokenize(lowerCAmelCase_ ) # , add_prefix_space=True) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = tokens + [tokenizer.unk_token] UpperCAmelCase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase_ ) , [0, 31_414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase_ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict: UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained("roberta-base" ) UpperCAmelCase_ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer.encode( "sequence builders" , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.encode( "sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: UpperCAmelCase_ : str = self.get_tokenizer() UpperCAmelCase_ : Optional[int] = "Encode this sequence." UpperCAmelCase_ : List[str] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]] # Testing encoder arguments UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) tokenizer.add_special_tokens({"bos_token": "<s>"} ) UpperCAmelCase_ : Tuple = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # Testing spaces after special tokens UpperCAmelCase_ : List[str] = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ )} ) # mask token has a left space UpperCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "Encode <mask> sequence" UpperCAmelCase_ : List[Any] = "Encode <mask>sequence" UpperCAmelCase_ : List[str] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : int = encoded.index(lowerCAmelCase_ ) UpperCAmelCase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = encoded.index(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = "A, <mask> AllenNLP sentence." UpperCAmelCase_ : Union[str, Any] = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) UpperCAmelCase_ : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) UpperCAmelCase_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowerCAmelCase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : str = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase_ ) self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase_ ) self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : List[str] = "hello" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase_ : Optional[int] = f"""{text_of_1_token} {text_of_1_token}""" UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , ) UpperCAmelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase_ ) + 1, len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , ) UpperCAmelCase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , ) UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (len(lowerCAmelCase_ ), len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , ) UpperCAmelCase_ : Optional[int] = f""" {text}""" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : str = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ) + 1, 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , ) UpperCAmelCase_ : Tuple = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , ) UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained( lowerCAmelCase_ , use_fast=lowerCAmelCase_ , add_prefix_space=lowerCAmelCase_ , trim_offsets=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = tokenizer_r(lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase_ )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(lowerCAmelCase_ ), 1 + len(lowerCAmelCase_ ) + 1 + len(lowerCAmelCase_ )) , )
268
"""simple docstring""" from __future__ import annotations import time lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict: UpperCAmelCase_ : Any = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : int = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Union[str, Any] = parent class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None: while self.node_queue: UpperCAmelCase_ : str = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[Any] = True return self.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ ) for node in successors: self.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : List[str] = [] for action in delta: UpperCAmelCase_ : List[Any] = parent.pos_x + action[1] UpperCAmelCase_ : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) ) return successors def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : Union[str, Any] = node UpperCAmelCase_ : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase_ : str = True return self.retrace_bidirectional_path( lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = current_bwd_node UpperCAmelCase_ : List[str] = current_fwd_node UpperCAmelCase_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path: UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ = time.time() lowerCamelCase_ = BreadthFirstSearch(init, goal) lowerCamelCase_ = bfs.search() lowerCamelCase_ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCamelCase_ = time.time() lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ = bd_bfs.search() lowerCamelCase_ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
268
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCamelCase_ (__A ): __magic_name__ = '''sew-d''' def __init__( self : Optional[Any] , lowerCAmelCase_ : List[str]=32 , lowerCAmelCase_ : Dict=768 , lowerCAmelCase_ : List[str]=12 , lowerCAmelCase_ : List[Any]=12 , lowerCAmelCase_ : Tuple=3_072 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : Optional[Any]=512 , lowerCAmelCase_ : List[str]=256 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=("p2c", "c2p") , lowerCAmelCase_ : Union[str, Any]="layer_norm" , lowerCAmelCase_ : Union[str, Any]="gelu_python" , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : int=0.0 , lowerCAmelCase_ : Optional[int]=0.1 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Union[str, Any]=1e-7 , lowerCAmelCase_ : List[str]=1e-5 , lowerCAmelCase_ : Dict="group" , lowerCAmelCase_ : Dict="gelu" , lowerCAmelCase_ : Optional[int]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCAmelCase_ : Tuple=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase_ : Union[str, Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[Any]=128 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : int=0.0_5 , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : Union[str, Any]=10 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : Optional[Any]="mean" , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[str]=False , lowerCAmelCase_ : List[str]=256 , lowerCAmelCase_ : Optional[Any]=0 , lowerCAmelCase_ : int=1 , lowerCAmelCase_ : Union[str, Any]=2 , **lowerCAmelCase_ : Optional[int] , ) -> Dict: super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = hidden_size UpperCAmelCase_ : Tuple = feat_extract_norm UpperCAmelCase_ : Optional[Any] = feat_extract_activation UpperCAmelCase_ : str = list(lowerCAmelCase_ ) UpperCAmelCase_ : Any = list(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = conv_bias UpperCAmelCase_ : List[str] = num_conv_pos_embeddings UpperCAmelCase_ : Tuple = num_conv_pos_embedding_groups UpperCAmelCase_ : Tuple = len(self.conv_dim ) UpperCAmelCase_ : Optional[Any] = num_hidden_layers UpperCAmelCase_ : Any = intermediate_size UpperCAmelCase_ : Tuple = squeeze_factor UpperCAmelCase_ : Optional[Any] = max_position_embeddings UpperCAmelCase_ : Tuple = position_buckets UpperCAmelCase_ : List[str] = share_att_key UpperCAmelCase_ : Dict = relative_attention UpperCAmelCase_ : Union[str, Any] = norm_rel_ebd UpperCAmelCase_ : Dict = list(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = hidden_act UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : Optional[int] = hidden_dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : Tuple = feat_proj_dropout UpperCAmelCase_ : Optional[int] = final_dropout UpperCAmelCase_ : Tuple = layer_norm_eps UpperCAmelCase_ : str = feature_layer_norm_eps UpperCAmelCase_ : Union[str, Any] = initializer_range UpperCAmelCase_ : Any = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( "Configuration for convolutional layers is incorrect." "It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`," f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 UpperCAmelCase_ : Optional[int] = apply_spec_augment UpperCAmelCase_ : Optional[Any] = mask_time_prob UpperCAmelCase_ : int = mask_time_length UpperCAmelCase_ : List[Any] = mask_time_min_masks UpperCAmelCase_ : Optional[Any] = mask_feature_prob UpperCAmelCase_ : List[Any] = mask_feature_length UpperCAmelCase_ : str = mask_feature_min_masks # ctc loss UpperCAmelCase_ : Any = ctc_loss_reduction UpperCAmelCase_ : Dict = ctc_zero_infinity # sequence classification UpperCAmelCase_ : Dict = use_weighted_layer_sum UpperCAmelCase_ : Union[str, Any] = classifier_proj_size @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
268
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = MBartTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Tuple = vocab_file UpperCAmelCase_ : str = False if not self.vocab_file else True UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCAmelCase_ : Tuple = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase_ : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : List[str] = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding: UpperCAmelCase_ : List[Any] = src_lang UpperCAmelCase_ : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None: UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCAmelCase_ : List[str] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
268
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class UpperCamelCase_ : __magic_name__ = 42 __magic_name__ = None __magic_name__ = None def snake_case ( ): UpperCAmelCase_ : Union[str, Any] = Node(1 ) UpperCAmelCase_ : List[Any] = Node(2 ) UpperCAmelCase_ : Tuple = Node(3 ) UpperCAmelCase_ : Tuple = Node(4 ) UpperCAmelCase_ : Optional[int] = Node(5 ) return tree def snake_case ( A__ ): return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def snake_case ( A__ ): return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def snake_case ( A__ ): return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def snake_case ( A__ ): return (max(height(root.left ) ,height(root.right ) ) + 1) if root else 0 def snake_case ( A__ ): UpperCAmelCase_ : list[Any] = [] if root is None: return output UpperCAmelCase_ : Any = deque([root] ) while process_queue: UpperCAmelCase_ : List[Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def snake_case ( A__ ,A__ ): UpperCAmelCase_ : list[Any] = [] def populate_output(A__ ,A__ ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left ,level - 1 ) populate_output(root.right ,level - 1 ) populate_output(A__ ,A__ ) return output def snake_case ( A__ ,A__ ): UpperCAmelCase_ : list[Any] = [] def populate_output(A__ ,A__ ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right ,level - 1 ) populate_output(root.left ,level - 1 ) populate_output(A__ ,A__ ) return output def snake_case ( A__ ): if root is None: return [] UpperCAmelCase_ : list[Sequence[Node | None]] = [] UpperCAmelCase_ : Any = 0 UpperCAmelCase_ : List[str] = height(A__ ) for h in range(1 ,height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(A__ ,A__ ) ) UpperCAmelCase_ : Optional[int] = 1 else: output.append(get_nodes_from_right_to_left(A__ ,A__ ) ) UpperCAmelCase_ : Any = 0 return output def snake_case ( ): # Main function for testing. UpperCAmelCase_ : Union[str, Any] = make_tree() print(F"""In-order Traversal: {inorder(A__ )}""" ) print(F"""Pre-order Traversal: {preorder(A__ )}""" ) print(F"""Post-order Traversal: {postorder(A__ )}""" ,"\n" ) print(F"""Height of Tree: {height(A__ )}""" ,"\n" ) print("Complete Level Order Traversal: " ) print(level_order(A__ ) ,"\n" ) print("Level-wise order Traversal: " ) for level in range(1 ,height(A__ ) + 1 ): print(F"""Level {level}:""" ,get_nodes_from_left_to_right(A__ ,level=A__ ) ) print("\nZigZag order Traversal: " ) print(zigzag(A__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
268
"""simple docstring""" from torch import nn def snake_case ( A__ ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
268
1
"""simple docstring""" import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowerCamelCase_ = logging.get_logger(__name__) class UpperCamelCase_ : __magic_name__ = None @experimental def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ): if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ) return _map_with_joblib(A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ) def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = num_proc if num_proc <= len(A__ ) else len(A__ ) UpperCAmelCase_ : Optional[Any] = [] # We organize the splits ourselve (contiguous splits) for index in range(A__ ): UpperCAmelCase_ : Union[str, Any] = len(A__ ) // num_proc UpperCAmelCase_ : Dict = len(A__ ) % num_proc UpperCAmelCase_ : Any = div * index + min(A__ ,A__ ) UpperCAmelCase_ : int = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(A__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F"""Error dividing inputs iterable among processes. """ F"""Total number of objects {len(A__ )}, """ F"""length: {sum(len(i[1] ) for i in split_kwds )}""" ) logger.info( F"""Spawning {num_proc} processes for {len(A__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" ) UpperCAmelCase_ , UpperCAmelCase_ : str = None, None if not disable_tqdm: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = (RLock(),), tqdm.set_lock with Pool(A__ ,initargs=A__ ,initializer=A__ ) as pool: UpperCAmelCase_ : Any = pool.map(A__ ,A__ ) logger.info(F"""Finished {num_proc} processes""" ) UpperCAmelCase_ : Tuple = [obj for proc_res in mapped for obj in proc_res] logger.info(F"""Unpacked {len(A__ )} objects""" ) return mapped def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ): # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=A__ ): return joblib.Parallel()( joblib.delayed(A__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def snake_case ( A__ ): UpperCAmelCase_ : Any = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: UpperCAmelCase_ : int = None
268
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class UpperCamelCase_ : def __init__( self : str ) -> Dict: UpperCAmelCase_ : List[Any] = "" UpperCAmelCase_ : int = "" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : List[Any] = 256 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[str] = 0 def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 ) UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): UpperCAmelCase_ : List[Any] = x[i] / self.k self.sk += prk UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk if self.rem != 0: UpperCAmelCase_ : Any = int(last % last ) UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) UpperCAmelCase_ : Dict = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCAmelCase_ : Any = self.img[j][i] if num != self.last_list[num]: UpperCAmelCase_ : Tuple = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCamelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
268
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase_ = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ['''GLPNFeatureExtractor'''] lowerCamelCase_ = ['''GLPNImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GLPNForDepthEstimation''', '''GLPNLayer''', '''GLPNModel''', '''GLPNPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_glpn import GLPNFeatureExtractor from .image_processing_glpn import GLPNImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_glpn import ( GLPN_PRETRAINED_MODEL_ARCHIVE_LIST, GLPNForDepthEstimation, GLPNLayer, GLPNModel, GLPNPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : str = image_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Tuple = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : int = depths UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : str = scope UpperCAmelCase_ : str = len(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[Any] = layer_type UpperCAmelCase_ : int = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ): UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple() def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ): if isinstance(lowerCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
268
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase_ = { '''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''], '''tokenization_biogpt''': ['''BioGptTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BioGptForCausalLM''', '''BioGptForTokenClassification''', '''BioGptForSequenceClassification''', '''BioGptModel''', '''BioGptPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig from .tokenization_biogpt import BioGptTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_biogpt import ( BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class UpperCamelCase_ : __magic_name__ = '''dummy_data''' __magic_name__ = '''datasets''' __magic_name__ = False def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple: UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : int = dataset_name UpperCAmelCase_ : Optional[int] = cache_dir UpperCAmelCase_ : Tuple = use_local_dummy_data UpperCAmelCase_ : int = config # download_callbacks take a single url as input UpperCAmelCase_ : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general UpperCAmelCase_ : Dict = str(lowerCAmelCase_ ) # to be downloaded UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : int = None @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if self._dummy_file is None: UpperCAmelCase_ : List[str] = self.download_dummy_data() return self._dummy_file @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: UpperCAmelCase_ : int = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) UpperCAmelCase_ : Union[str, Any] = cached_path( lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ ) return os.path.join(lowerCAmelCase_ , self.dummy_file_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: if self._bucket_url is None: UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]: if self.load_existing_dummy_data: # dummy data is downloaded and tested UpperCAmelCase_ : Dict = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned UpperCAmelCase_ : Optional[int] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , (list, tuple) ): return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ ) else: return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]: return path def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: return {} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for single_url in single_urls: download_callback(lowerCAmelCase_ ) else: UpperCAmelCase_ : Tuple = single_urls download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls] else: UpperCAmelCase_ : Optional[int] = single_urls UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) UpperCAmelCase_ : int = value # make sure that values are unique if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict: UpperCAmelCase_ : str = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url ) UpperCAmelCase_ : Union[str, Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(lowerCAmelCase_ ) return dummy_data_list def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]: def _iter_archive_members(lowerCAmelCase_ : Dict ): # this preserves the order of the members inside the ZIP archive UpperCAmelCase_ : str = Path(self.dummy_file ).parent UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: UpperCAmelCase_ : str = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : str = [paths] for path in paths: if os.path.isfile(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(lowerCAmelCase_ ): if filename.startswith((".", "__") ): continue yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" import os from typing import List, Optional, Union from ...image_processing_utils import BatchFeature from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType from ..auto import AutoTokenizer class UpperCamelCase_ (__A ): __magic_name__ = ['''image_processor''', '''tokenizer'''] __magic_name__ = '''BlipImageProcessor''' __magic_name__ = '''AutoTokenizer''' def __init__( self : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str ) -> str: super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) # add QFormer tokenizer UpperCAmelCase_ : Tuple = qformer_tokenizer def __call__( self : List[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : int , ) -> BatchFeature: if images is None and text is None: raise ValueError("You have to specify at least images or text." ) UpperCAmelCase_ : int = BatchFeature() if text is not None: UpperCAmelCase_ : List[str] = self.tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) encoding.update(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.qformer_tokenizer( text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = qformer_text_encoding.pop("input_ids" ) UpperCAmelCase_ : Union[str, Any] = qformer_text_encoding.pop("attention_mask" ) if images is not None: UpperCAmelCase_ : Tuple = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ ) encoding.update(lowerCAmelCase_ ) return encoding def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[int] ) -> List[str]: return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : str ) -> Optional[int]: return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = self.tokenizer.model_input_names UpperCAmelCase_ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : int ) -> Dict: if os.path.isfile(lowerCAmelCase_ ): raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" ) os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) UpperCAmelCase_ : str = os.path.join(lowerCAmelCase_ , "qformer_tokenizer" ) self.qformer_tokenizer.save_pretrained(lowerCAmelCase_ ) return super().save_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Dict ) -> Dict: UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(lowerCAmelCase_ , subfolder="qformer_tokenizer" ) UpperCAmelCase_ : Optional[int] = cls._get_arguments_from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) args.append(lowerCAmelCase_ ) return cls(*lowerCAmelCase_ )
268
"""simple docstring""" lowerCamelCase_ = [ (1000, '''M'''), (900, '''CM'''), (500, '''D'''), (400, '''CD'''), (100, '''C'''), (90, '''XC'''), (50, '''L'''), (40, '''XL'''), (10, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def snake_case ( A__ ): UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00} UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Tuple = 0 while place < len(A__ ): if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = [] for arabic, roman in ROMAN: ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ ) result.append(roman * factor ) if number == 0: break return "".join(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(A__ ): requests.request("GET" ,"https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" ,"https://huggingface.co" ,timeout=1.0 ) @pytest.mark.integration def snake_case ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" ,"https://huggingface.co" ) def snake_case ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(A__ ): http_head("https://huggingface.co" )
268
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : List[str] = relative_attention UpperCAmelCase_ : List[Any] = position_biased_input UpperCAmelCase_ : Dict = pos_att_type UpperCAmelCase_ : Optional[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Tuple = None if self.use_input_mask: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = self.get_config() UpperCAmelCase_ : int = 300 return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = DebertaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ (unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" ) UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
268
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase_ (metaclass=__A ): __magic_name__ = ['''onnx'''] def __init__( self : Optional[int] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]: requires_backends(self , ["onnx"] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Tuple , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : List[str] ) -> Dict: requires_backends(cls , ["onnx"] ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Tuple ) -> Optional[Any]: requires_backends(cls , ["onnx"] )
268
"""simple docstring""" import os def snake_case ( ): with open(os.path.dirname(A__ ) + "/grid.txt" ) as f: UpperCAmelCase_ : Any = [] # noqa: E741 for _ in range(20 ): l.append([int(A__ ) for x in f.readline().split()] ) UpperCAmelCase_ : Any = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ : Any = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ : Tuple = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 ,20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp return maximum if __name__ == "__main__": print(solution())
268
1
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ = { '''configuration_informer''': [ '''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InformerForPrediction''', '''InformerModel''', '''InformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_informer import ( INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, InformerForPrediction, InformerModel, InformerPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def snake_case ( A__ ): UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 ) if "base" in model_name: UpperCAmelCase_ : Any = 6 UpperCAmelCase_ : Optional[Any] = 1_28 UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2) UpperCAmelCase_ : List[str] = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : int = 1_92 UpperCAmelCase_ : List[Any] = (2, 2, 18, 2) UpperCAmelCase_ : int = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase_ : str = window_size UpperCAmelCase_ : Any = embed_dim UpperCAmelCase_ : int = depths UpperCAmelCase_ : Any = num_heads return config def snake_case ( A__ ): if "encoder.mask_token" in name: UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" ) if "norm1" in name: UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase_ : List[str] = "layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase_ : int = "layernorm.bias" if "decoder" in name: pass else: UpperCAmelCase_ : Any = "swin." + name return name def snake_case ( A__ ,A__ ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase_ : Optional[int] = key.split("." ) UpperCAmelCase_ : str = int(key_split[2] ) UpperCAmelCase_ : Union[str, Any] = int(key_split[4] ) UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[Any] = val[:dim, :] UpperCAmelCase_ : str = val[ dim : dim * 2, : ] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : List[str] = val[ :dim ] UpperCAmelCase_ : str = val[ dim : dim * 2 ] UpperCAmelCase_ : Optional[Any] = val[ -dim: ] else: UpperCAmelCase_ : Tuple = val return orig_state_dict def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"] UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ ) UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ ) model.eval() UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ ) model.load_state_dict(A__ ) UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} ) UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw ) UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(**A__ ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase_ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
268
1
"""simple docstring""" from manim import * class UpperCamelCase_ (__A ): def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: UpperCAmelCase_ : Any = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase_ : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) UpperCAmelCase_ : List[Any] = Rectangle(height=0.2_5 , width=0.2_5 ) UpperCAmelCase_ : Tuple = [mem.copy() for i in range(6 )] UpperCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(6 )] UpperCAmelCase_ : List[str] = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : int = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : int = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : List[Any] = Text("CPU" , font_size=24 ) UpperCAmelCase_ : List[str] = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [mem.copy() for i in range(4 )] UpperCAmelCase_ : Tuple = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : List[Any] = Text("GPU" , font_size=24 ) UpperCAmelCase_ : Optional[Any] = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(lowerCAmelCase_ ) UpperCAmelCase_ : Any = [mem.copy() for i in range(6 )] UpperCAmelCase_ : int = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : Tuple = Text("Model" , font_size=24 ) UpperCAmelCase_ : Optional[Any] = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ ) model.move_to([3, -1.0, 0] ) self.add(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : int = [] for i, rect in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = fill.copy().set_fill(lowerCAmelCase_ , opacity=0.8 ) target.move_to(lowerCAmelCase_ ) model_arr.append(lowerCAmelCase_ ) UpperCAmelCase_ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase_ , opacity=0.8 ) cpu_target.move_to(cpu_left_col_base[i] ) model_cpu_arr.append(lowerCAmelCase_ ) self.add(*lowerCAmelCase_ , *lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = [meta_mem.copy() for i in range(6 )] UpperCAmelCase_ : Any = [meta_mem.copy() for i in range(6 )] UpperCAmelCase_ : Dict = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : Optional[int] = VGroup(*lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : Dict = VGroup(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0 ) UpperCAmelCase_ : Optional[int] = Text("Disk" , font_size=24 ) UpperCAmelCase_ : Union[str, Any] = Group(lowerCAmelCase_ , lowerCAmelCase_ ).arrange(lowerCAmelCase_ , buff=0.5 , aligned_edge=lowerCAmelCase_ ) disk.move_to([-4, -1.2_5, 0] ) self.add(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase_ : Dict = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : int = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(lowerCAmelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = MarkupText( f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase_ ) ) UpperCAmelCase_ : str = Square(0.3 ) input.set_fill(lowerCAmelCase_ , opacity=1.0 ) input.set_stroke(width=0.0 ) input.next_to(model_base[0] , lowerCAmelCase_ , buff=0.5 ) self.play(Write(lowerCAmelCase_ ) ) input.generate_target() input.target.next_to(model_arr[0] , direction=lowerCAmelCase_ , buff=0.0_2 ) self.play(MoveToTarget(lowerCAmelCase_ ) ) self.play(FadeOut(lowerCAmelCase_ ) ) UpperCAmelCase_ : Any = Arrow(start=lowerCAmelCase_ , end=lowerCAmelCase_ , color=lowerCAmelCase_ , buff=0.5 ) a.next_to(model_arr[0].get_left() , lowerCAmelCase_ , buff=0.2 ) model_cpu_arr[0].generate_target() model_cpu_arr[0].target.move_to(gpu_rect[0] ) UpperCAmelCase_ : str = MarkupText( f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase_ , run_time=3 ) ) UpperCAmelCase_ : Optional[int] = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2} self.play( Write(lowerCAmelCase_ ) , Circumscribe(model_arr[0] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , Circumscribe(model_cpu_arr[0] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[0] ) ) UpperCAmelCase_ : int = a.copy() for i in range(6 ): a_c.next_to(model_arr[i].get_right() + 0.0_2 , lowerCAmelCase_ , buff=0.2 ) input.generate_target() input.target.move_to(model_arr[i].get_right() + 0.0_2 ) UpperCAmelCase_ : List[str] = AnimationGroup( FadeOut(lowerCAmelCase_ , run_time=0.5 ) , MoveToTarget(lowerCAmelCase_ , run_time=0.5 ) , FadeIn(lowerCAmelCase_ , run_time=0.5 ) , lag_ratio=0.2 ) self.play(lowerCAmelCase_ ) model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[i] ) if i < 5: model_cpu_arr[i + 1].generate_target() model_cpu_arr[i + 1].target.move_to(gpu_rect[0] ) if i >= 1: UpperCAmelCase_ : Dict = 0.7 self.play( Circumscribe(model_arr[i] , **lowerCAmelCase_ ) , Circumscribe(cpu_left_col_base[i] , **lowerCAmelCase_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , Circumscribe(model_arr[i + 1] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , ) if i < 1: self.play( MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , ) else: self.play( MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , ) else: model_cpu_arr[i].generate_target() model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] ) input.generate_target() input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 ) self.play( Circumscribe(model_arr[-1] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase_ , **lowerCAmelCase_ ) , ) self.play(MoveToTarget(model_cpu_arr[i] ) ) UpperCAmelCase_ : List[str] = a_c UpperCAmelCase_ : Tuple = a_c.copy() input.generate_target() input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 ) self.play( FadeOut(lowerCAmelCase_ ) , FadeOut(lowerCAmelCase_ , run_time=0.5 ) , ) UpperCAmelCase_ : str = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCAmelCase_ , run_time=3 ) , MoveToTarget(lowerCAmelCase_ ) ) self.wait()
268
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''rwkv''' __magic_name__ = {'''max_position_embeddings''': '''context_length'''} def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[str] = context_length UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : List[Any] = rescale_every UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : List[str] = bos_token_id UpperCAmelCase_ : Union[str, Any] = eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
268
1
"""simple docstring""" def snake_case ( A__ ): if len(A__ ) <= 1: return [tuple(A__ )] UpperCAmelCase_ : Dict = [] def generate(A__ ,A__ ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 ,A__ ) for i in range(k - 1 ): if k % 2 == 0: # k is even UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = arr[k - 1], arr[i] else: # k is odd UpperCAmelCase_ , UpperCAmelCase_ : Tuple = arr[k - 1], arr[0] generate(k - 1 ,A__ ) generate(len(A__ ) ,A__ ) return res if __name__ == "__main__": lowerCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip() lowerCamelCase_ = [int(item) for item in user_input.split(''',''')] print(heaps(arr))
268
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
268
1
"""simple docstring""" import json import os import re import sys import urllib.request import requests from bsa import BeautifulSoup lowerCamelCase_ = { '''User-Agent''': '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36''' ''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''' } def snake_case ( A__ = "dhaka" ,A__ = 5 ): UpperCAmelCase_ : Union[str, Any] = min(A__ ,50 ) # Prevent abuse! UpperCAmelCase_ : Tuple = { "q": query, "tbm": "isch", "hl": "en", "ijn": "0", } UpperCAmelCase_ : List[str] = requests.get("https://www.google.com/search" ,params=A__ ,headers=A__ ) UpperCAmelCase_ : Tuple = BeautifulSoup(html.text ,"html.parser" ) UpperCAmelCase_ : str = "".join( re.findall(r"AF_initDataCallback\(([^<]+)\);" ,str(soup.select("script" ) ) ) ) UpperCAmelCase_ : Any = json.dumps(A__ ) UpperCAmelCase_ : List[Any] = json.loads(A__ ) UpperCAmelCase_ : Any = re.findall( r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," ,A__ ,) if not matched_google_image_data: return 0 UpperCAmelCase_ : Optional[int] = re.sub( r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" ,"" ,str(A__ ) ,) UpperCAmelCase_ : Tuple = re.findall( r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" ,A__ ,) for index, fixed_full_res_image in enumerate(A__ ): if index >= max_images: return index UpperCAmelCase_ : List[str] = bytes(A__ ,"ascii" ).decode( "unicode-escape" ) UpperCAmelCase_ : List[str] = bytes(A__ ,"ascii" ).decode( "unicode-escape" ) UpperCAmelCase_ : List[str] = urllib.request.build_opener() UpperCAmelCase_ : Dict = [ ( "User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36" " (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582", ) ] urllib.request.install_opener(A__ ) UpperCAmelCase_ : int = F"""query_{query.replace(' ' ,'_' )}""" if not os.path.exists(A__ ): os.makedirs(A__ ) urllib.request.urlretrieve( # noqa: S310 A__ ,F"""{path_name}/original_size_img_{index}.jpg""" ) return index if __name__ == "__main__": try: lowerCamelCase_ = download_images_from_google_query(sys.argv[1]) print(f'{image_count} images were downloaded to disk.') except IndexError: print('''Please provide a search term.''') raise
268
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self : Any , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def snake_case ( A__ ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def snake_case ( A__ ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def snake_case ( A__ ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def snake_case ( ): # Main function for testing. UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Any = Node(2 ) UpperCAmelCase_ : Optional[Any] = Node(3 ) UpperCAmelCase_ : Union[str, Any] = Node(4 ) UpperCAmelCase_ : int = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Any = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : List[Any] = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("Tree is: " ) display(A__ ) if __name__ == "__main__": main()
268
1
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Any = WavaVecaForSequenceClassification.from_pretrained(A__ ,config=A__ ) UpperCAmelCase_ : str = downstream_dict["projector.weight"] UpperCAmelCase_ : Dict = downstream_dict["projector.bias"] UpperCAmelCase_ : str = downstream_dict["model.post_net.linear.weight"] UpperCAmelCase_ : List[Any] = downstream_dict["model.post_net.linear.bias"] return model def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Optional[Any] = WavaVecaForAudioFrameClassification.from_pretrained(A__ ,config=A__ ) UpperCAmelCase_ : Dict = downstream_dict["model.linear.weight"] UpperCAmelCase_ : Optional[Any] = downstream_dict["model.linear.bias"] return model def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = WavaVecaForXVector.from_pretrained(A__ ,config=A__ ) UpperCAmelCase_ : Tuple = downstream_dict["connector.weight"] UpperCAmelCase_ : Dict = downstream_dict["connector.bias"] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): UpperCAmelCase_ : Union[str, Any] = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] UpperCAmelCase_ : Tuple = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] UpperCAmelCase_ : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"] UpperCAmelCase_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"] UpperCAmelCase_ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"] UpperCAmelCase_ : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"] UpperCAmelCase_ : List[Any] = downstream_dict["objective.W"] return model @torch.no_grad() def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[str] = torch.load(A__ ,map_location="cpu" ) UpperCAmelCase_ : Tuple = checkpoint["Downstream"] UpperCAmelCase_ : Tuple = WavaVecaConfig.from_pretrained(A__ ) UpperCAmelCase_ : str = WavaVecaFeatureExtractor.from_pretrained( A__ ,return_attention_mask=A__ ,do_normalize=A__ ) UpperCAmelCase_ : Union[str, Any] = hf_config.architectures[0] if arch.endswith("ForSequenceClassification" ): UpperCAmelCase_ : int = convert_classification(A__ ,A__ ,A__ ) elif arch.endswith("ForAudioFrameClassification" ): UpperCAmelCase_ : int = convert_diarization(A__ ,A__ ,A__ ) elif arch.endswith("ForXVector" ): UpperCAmelCase_ : Dict = convert_xvector(A__ ,A__ ,A__ ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: UpperCAmelCase_ : Tuple = checkpoint["Featurizer"]["weights"] hf_feature_extractor.save_pretrained(A__ ) hf_model.save_pretrained(A__ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') lowerCamelCase_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
268
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): if index == number_of_items: return 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack( A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 ) return max(A__ ,A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" def snake_case ( A__ ): if not numbers: return 0 if not isinstance(A__ ,(list, tuple) ) or not all( isinstance(A__ ,A__ ) for number in numbers ): raise ValueError("numbers must be an iterable of integers" ) UpperCAmelCase_ : List[str] = numbers[0] for i in range(1 ,len(A__ ) ): # update the maximum and minimum subarray products UpperCAmelCase_ : Optional[int] = numbers[i] if number < 0: UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = min_till_now, max_till_now UpperCAmelCase_ : Any = max(A__ ,max_till_now * number ) UpperCAmelCase_ : List[Any] = min(A__ ,min_till_now * number ) # update the maximum product found till now UpperCAmelCase_ : List[str] = max(A__ ,A__ ) return max_prod
268
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
1
"""simple docstring""" # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def snake_case ( A__=None ): if subparsers is not None: UpperCAmelCase_ : List[Any] = subparsers.add_parser("env" ) else: UpperCAmelCase_ : str = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file" ,default=A__ ,help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=A__ ) return parser def snake_case ( A__ ): UpperCAmelCase_ : int = torch.__version__ UpperCAmelCase_ : Union[str, Any] = torch.cuda.is_available() UpperCAmelCase_ : Optional[int] = is_xpu_available() UpperCAmelCase_ : Any = is_npu_available() UpperCAmelCase_ : Dict = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(A__ ): UpperCAmelCase_ : int = load_config_from_file(args.config_file ).to_dict() UpperCAmelCase_ : List[str] = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""", "PyTorch XPU available": str(A__ ), "PyTorch NPU available": str(A__ ), "System RAM": F"""{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB""", } if pt_cuda_available: UpperCAmelCase_ : Any = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) UpperCAmelCase_ : Tuple = ( "\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(A__ ,A__ ) else F"""\t{accelerate_config}""" ) print(A__ ) UpperCAmelCase_ : Optional[int] = accelerate_config return info def snake_case ( ): UpperCAmelCase_ : str = env_command_parser() UpperCAmelCase_ : List[str] = parser.parse_args() env_command(A__ ) return 0 if __name__ == "__main__": raise SystemExit(main())
268
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class UpperCamelCase_ (__A ): def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]: UpperCAmelCase_ : str = {} if top_k is not None: UpperCAmelCase_ : List[str] = top_k return {}, {}, postprocess_params def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any: UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ ) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any: if top_k > self.model.config.num_labels: UpperCAmelCase_ : int = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ ) elif self.framework == "tf": UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase_ : int = scores.tolist() UpperCAmelCase_ : Optional[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
268
1
"""simple docstring""" import os from collections import deque import torch from torch.utils.data import Dataset class UpperCamelCase_ (__A ): def __init__( self : List[str] , lowerCAmelCase_ : Optional[Any]="" , lowerCAmelCase_ : Optional[int]="train" ) -> Union[str, Any]: assert os.path.isdir(lowerCAmelCase_ ) UpperCAmelCase_ : Any = [] UpperCAmelCase_ : List[str] = os.listdir(lowerCAmelCase_ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue UpperCAmelCase_ : Any = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) if not os.path.isfile(lowerCAmelCase_ ): continue self.documents.append(lowerCAmelCase_ ) def __len__( self : Union[str, Any] ) -> int: return len(self.documents ) def __getitem__( self : List[str] , lowerCAmelCase_ : Any ) -> List[Any]: UpperCAmelCase_ : Optional[int] = self.documents[idx] UpperCAmelCase_ : str = document_path.split("/" )[-1] with open(lowerCAmelCase_ , encoding="utf-8" ) as source: UpperCAmelCase_ : int = source.read() UpperCAmelCase_ , UpperCAmelCase_ : Tuple = process_story(lowerCAmelCase_ ) return document_name, story_lines, summary_lines def snake_case ( A__ ): UpperCAmelCase_ : Dict = list(filter(lambda A__ : len(A__ ) != 0 ,[line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it UpperCAmelCase_ : int = [_add_missing_period(A__ ) for line in nonempty_lines] # gather article lines UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : str = deque(A__ ) while True: try: UpperCAmelCase_ : Any = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(A__ ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines UpperCAmelCase_ : Dict = list(filter(lambda A__ : not t.startswith("@highlight" ) ,A__ ) ) return story_lines, summary_lines def snake_case ( A__ ): UpperCAmelCase_ : Optional[Any] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def snake_case ( A__ ,A__ ,A__ ): if len(A__ ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(A__ )) ) return sequence def snake_case ( A__ ,A__ ): UpperCAmelCase_ : List[str] = torch.ones_like(A__ ) UpperCAmelCase_ : Tuple = sequence == pad_token_id UpperCAmelCase_ : List[str] = 0 return mask def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Any = [tokenizer.encode(A__ ) for line in story_lines] UpperCAmelCase_ : List[Any] = [token for sentence in story_lines_token_ids for token in sentence] UpperCAmelCase_ : str = [tokenizer.encode(A__ ) for line in summary_lines] UpperCAmelCase_ : int = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Tuple = [] for sequence in batch: UpperCAmelCase_ : Union[str, Any] = -1 UpperCAmelCase_ : Optional[int] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(A__ ) return torch.tensor(A__ )
268
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCamelCase_ (__A ): __magic_name__ = '''detr''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : str = use_timm_backbone UpperCAmelCase_ : Optional[Any] = backbone_config UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Dict = num_queries UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = encoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = encoder_layers UpperCAmelCase_ : Optional[int] = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : Optional[int] = decoder_attention_heads UpperCAmelCase_ : List[Any] = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : List[str] = activation_function UpperCAmelCase_ : Optional[int] = init_std UpperCAmelCase_ : Union[str, Any] = init_xavier_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Any = auxiliary_loss UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : List[str] = backbone UpperCAmelCase_ : int = use_pretrained_backbone UpperCAmelCase_ : Any = dilation # Hungarian matcher UpperCAmelCase_ : str = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : List[str] = mask_loss_coefficient UpperCAmelCase_ : Dict = dice_loss_coefficient UpperCAmelCase_ : Any = bbox_loss_coefficient UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase_ : int = eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self.d_model @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]: return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]: UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : Any = self.__class__.model_type return output class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 12
268
1
"""simple docstring""" from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) lowerCamelCase_ = 2_9979_2458 # Symbols lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = symbols('''ct x y z''') def snake_case ( A__ ): if velocity > c: raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("Speed must be greater than or equal to 1!" ) return velocity / c def snake_case ( A__ ): return 1 / sqrt(1 - beta(A__ ) ** 2 ) def snake_case ( A__ ): return np.array( [ [gamma(A__ ), -gamma(A__ ) * beta(A__ ), 0, 0], [-gamma(A__ ) * beta(A__ ), gamma(A__ ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def snake_case ( A__ ,A__ = None ): # Ensure event is not empty if event is None: UpperCAmelCase_ : Optional[Any] = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(A__ ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: lowerCamelCase_ = transform(2997_9245) print('''Example of four vector: ''') print(f'ct\' = {four_vector[0]}') print(f'x\' = {four_vector[1]}') print(f'y\' = {four_vector[2]}') print(f'z\' = {four_vector[3]}') # Substitute symbols with numerical values lowerCamelCase_ = {ct: c, x: 1, y: 1, z: 1} lowerCamelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)] print(f'\n{numerical_vector}')
268
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : List[Any]=32 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Any=16 , lowerCAmelCase_ : int=[32, 64, 128] , lowerCAmelCase_ : str=[1, 2, 1] , lowerCAmelCase_ : Optional[Any]=[2, 2, 4] , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : int=2.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Any=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str="gelu" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : int=True , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Optional[int]=1e-5 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=10 , lowerCAmelCase_ : Dict=8 , lowerCAmelCase_ : Optional[Any]=["stage1", "stage2"] , lowerCAmelCase_ : Optional[Any]=[1, 2] , ) -> int: UpperCAmelCase_ : List[str] = parent UpperCAmelCase_ : Optional[Any] = batch_size UpperCAmelCase_ : Tuple = image_size UpperCAmelCase_ : Optional[Any] = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : Union[str, Any] = embed_dim UpperCAmelCase_ : Tuple = hidden_sizes UpperCAmelCase_ : Dict = depths UpperCAmelCase_ : Union[str, Any] = num_heads UpperCAmelCase_ : str = window_size UpperCAmelCase_ : List[str] = mlp_ratio UpperCAmelCase_ : int = qkv_bias UpperCAmelCase_ : int = hidden_dropout_prob UpperCAmelCase_ : int = attention_probs_dropout_prob UpperCAmelCase_ : Optional[Any] = drop_path_rate UpperCAmelCase_ : List[str] = hidden_act UpperCAmelCase_ : List[Any] = use_absolute_embeddings UpperCAmelCase_ : Union[str, Any] = patch_norm UpperCAmelCase_ : str = layer_norm_eps UpperCAmelCase_ : List[Any] = initializer_range UpperCAmelCase_ : Optional[int] = is_training UpperCAmelCase_ : str = scope UpperCAmelCase_ : Tuple = use_labels UpperCAmelCase_ : int = type_sequence_label_size UpperCAmelCase_ : int = encoder_stride UpperCAmelCase_ : List[str] = out_features UpperCAmelCase_ : Any = out_indices def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]: UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : str = None if self.use_labels: UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : int = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ) -> Tuple: UpperCAmelCase_ : Any = FocalNetModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] ) -> Dict: UpperCAmelCase_ : List[str] = FocalNetBackbone(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Optional[Any] = FocalNetBackbone(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : str = FocalNetForMaskedImageModeling(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase_ : Optional[int] = 1 UpperCAmelCase_ : int = FocalNetForMaskedImageModeling(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.type_sequence_label_size UpperCAmelCase_ : Dict = FocalNetForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase_ : Dict = 1 UpperCAmelCase_ : Any = FocalNetForImageClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = model(lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = config_and_inputs UpperCAmelCase_ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) __magic_name__ = ( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = FocalNetModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , embed_dim=37 , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: return def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @unittest.skip(reason="FocalNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: pass @unittest.skip(reason="FocalNet does not use feedforward chunking" ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Optional[int] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : int = [*signature.parameters.keys()] UpperCAmelCase_ : List[str] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any ) -> Dict: UpperCAmelCase_ : List[str] = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): UpperCAmelCase_ : int = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) UpperCAmelCase_ : Optional[Any] = outputs.hidden_states UpperCAmelCase_ : Tuple = getattr( self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) # FocalNet has a different seq_length UpperCAmelCase_ : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) UpperCAmelCase_ : Any = outputs.reshaped_hidden_states self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = reshaped_hidden_states[0].shape UpperCAmelCase_ : Union[str, Any] = ( reshaped_hidden_states[0].view(lowerCAmelCase_ , lowerCAmelCase_ , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : Tuple = True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : int = True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : List[Any] = 3 UpperCAmelCase_ : List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase_ : Optional[Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase_ : Any = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase_ : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: UpperCAmelCase_ : int = True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True self.check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , (padded_height, padded_width) ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Tuple = FocalNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str ) -> str: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : str = _config_zero_init(lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : List[Any] = model_class(config=lowerCAmelCase_ ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: # TODO update organization return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None @slow def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: UpperCAmelCase_ : Tuple = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCAmelCase_ ) UpperCAmelCase_ : str = self.default_image_processor UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : List[str] = model(**lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : Optional[int] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = (FocalNetBackbone,) if is_torch_available() else () __magic_name__ = FocalNetConfig __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: UpperCAmelCase_ : List[str] = FocalNetModelTester(self )
268
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) lowerCamelCase_ = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
268
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCamelCase_ : # setable values __magic_name__ = None __magic_name__ = None __magic_name__ = None # sigma(t_i) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]: return cls() @dataclass class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 class UpperCamelCase_ (__A , __A ): @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return True @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: return KarrasVeSchedulerState.create() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState: UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy() UpperCAmelCase_ : Dict = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 ) UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape ) UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict: raise NotImplementedError()
268
1
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch lowerCamelCase_ = True except ImportError: lowerCamelCase_ = False try: from torch.hub import _get_torch_home lowerCamelCase_ = _get_torch_home() except ImportError: lowerCamelCase_ = os.path.expanduser( os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch''')) ) lowerCamelCase_ = os.path.join(torch_cache_home, '''transformers''') lowerCamelCase_ = '''https://cdn.huggingface.co''' lowerCamelCase_ = '''https://s3.amazonaws.com/models.huggingface.co/bert''' lowerCamelCase_ = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1]) lowerCamelCase_ = os.path.join(PATH, '''config.yaml''') lowerCamelCase_ = os.path.join(PATH, '''attributes.txt''') lowerCamelCase_ = os.path.join(PATH, '''objects.txt''') lowerCamelCase_ = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path) lowerCamelCase_ = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE) lowerCamelCase_ = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE) lowerCamelCase_ = '''pytorch_model.bin''' lowerCamelCase_ = '''config.yaml''' def snake_case ( A__=OBJECTS ,A__=ATTRIBUTES ): UpperCAmelCase_ : Optional[int] = [] with open(A__ ) as f: for object in f.readlines(): vg_classes.append(object.split("," )[0].lower().strip() ) UpperCAmelCase_ : Dict = [] with open(A__ ) as f: for object in f.readlines(): vg_attrs.append(object.split("," )[0].lower().strip() ) return vg_classes, vg_attrs def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = OrderedDict() with open(A__ ,"rb" ) as f: UpperCAmelCase_ : Tuple = pkl.load(A__ )["model"] for k in copy.deepcopy(list(ckp.keys() ) ): UpperCAmelCase_ : List[str] = ckp.pop(A__ ) if isinstance(A__ ,np.ndarray ): UpperCAmelCase_ : Optional[int] = torch.tensor(A__ ) else: assert isinstance(A__ ,torch.tensor ), type(A__ ) UpperCAmelCase_ : Dict = v return r class UpperCamelCase_ : __magic_name__ = {} def __init__( self : List[str] , lowerCAmelCase_ : dict , lowerCAmelCase_ : str = "root" , lowerCAmelCase_ : Any=0 ) -> Any: UpperCAmelCase_ : Optional[Any] = name UpperCAmelCase_ : int = level UpperCAmelCase_ : Optional[Any] = {} for k, v in dictionary.items(): if v is None: raise ValueError() UpperCAmelCase_ : Tuple = copy.deepcopy(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = copy.deepcopy(lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = Config(lowerCAmelCase_ , name=lowerCAmelCase_ , level=level + 1 ) UpperCAmelCase_ : Optional[int] = v setattr(self , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = d def __repr__( self : Any ) -> List[Any]: return str(list((self._pointer.keys()) ) ) def __setattr__( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Optional[Any] = val UpperCAmelCase_ : List[str] = val UpperCAmelCase_ : Optional[int] = key.split("." ) UpperCAmelCase_ : Union[str, Any] = len(lowerCAmelCase_ ) - 1 UpperCAmelCase_ : str = self._pointer if len(lowerCAmelCase_ ) > 1: for i, l in enumerate(lowerCAmelCase_ ): if hasattr(self , lowerCAmelCase_ ) and isinstance(getattr(self , lowerCAmelCase_ ) , lowerCAmelCase_ ): setattr(getattr(self , lowerCAmelCase_ ) , ".".join(levels[i:] ) , lowerCAmelCase_ ) if l == last_level: UpperCAmelCase_ : str = val else: UpperCAmelCase_ : int = pointer[l] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: return self._pointer def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict ) -> Dict: with open(f"""{file_name}""" , "w" ) as stream: dump(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Any: with open(f"""{file_name}""" , "w" ) as stream: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Any ) -> Optional[Any]: with open(lowerCAmelCase_ ) as stream: UpperCAmelCase_ : Any = load(lowerCAmelCase_ , Loader=lowerCAmelCase_ ) return data def __str__( self : str ) -> int: UpperCAmelCase_ : Any = " " if self._name != "root": UpperCAmelCase_ : Dict = f"""{t * (self._level-1)}{self._name}:\n""" else: UpperCAmelCase_ : Any = "" UpperCAmelCase_ : int = self._level for i, (k, v) in enumerate(self._pointer.items() ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): r += f"""{t * (self._level)}{v}\n""" self._level += 1 else: r += f"""{t * (self._level)}{k}: {v} ({type(lowerCAmelCase_ ).__name__})\n""" UpperCAmelCase_ : List[str] = level return r[:-1] @classmethod def _SCREAMING_SNAKE_CASE ( cls : int , lowerCAmelCase_ : str , **lowerCAmelCase_ : str ) -> int: UpperCAmelCase_ , UpperCAmelCase_ : str = cls.get_config_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) return cls(lowerCAmelCase_ ) @classmethod def _SCREAMING_SNAKE_CASE ( cls : str , lowerCAmelCase_ : str , **lowerCAmelCase_ : List[str] ) -> int: UpperCAmelCase_ : int = kwargs.pop("cache_dir" , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = kwargs.pop("force_download" , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = kwargs.pop("resume_download" , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = kwargs.pop("proxies" , lowerCAmelCase_ ) UpperCAmelCase_ : Any = kwargs.pop("local_files_only" , lowerCAmelCase_ ) if os.path.isdir(lowerCAmelCase_ ): UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) elif os.path.isfile(lowerCAmelCase_ ) or is_remote_url(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[Any] = pretrained_model_name_or_path else: UpperCAmelCase_ : str = hf_bucket_url(lowerCAmelCase_ , filename=lowerCAmelCase_ , use_cdn=lowerCAmelCase_ ) try: # Load from URL or cache if already cached UpperCAmelCase_ : Optional[Any] = cached_path( lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , ) # Load config dict if resolved_config_file is None: raise EnvironmentError UpperCAmelCase_ : Optional[Any] = Config.load_yaml(lowerCAmelCase_ ) except EnvironmentError: UpperCAmelCase_ : Union[str, Any] = "Can't load config for" raise EnvironmentError(lowerCAmelCase_ ) if resolved_config_file == config_file: print("loading configuration file from path" ) else: print("loading configuration file cache" ) return Config.load_yaml(lowerCAmelCase_ ), kwargs def snake_case ( A__ ): UpperCAmelCase_ : Dict = torch.load("dump.pt" ,map_location=in_tensor.device ) UpperCAmelCase_ : Tuple = in_tensor.numpy() UpperCAmelCase_ : Any = out_tensor.numpy()[0] print(na.shape ,na[0, 0, :5] ) print(na.shape ,na[0, 0, :5] ) assert np.allclose(A__ ,A__ ,rtol=0.01 ,atol=0.1 ), ( F"""{sum([1 for x in np.isclose(A__ ,A__ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %""" " element-wise mismatch" ) raise Exception("tensors are all good" ) # Hugging face functions below def snake_case ( A__ ): UpperCAmelCase_ : Any = urlparse(A__ ) return parsed.scheme in ("http", "https") def snake_case ( A__ ,A__ ,A__=True ): UpperCAmelCase_ : Optional[int] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX UpperCAmelCase_ : Dict = "/" not in model_id if legacy_format: return F"""{endpoint}/{model_id}-{filename}""" else: return F"""{endpoint}/{model_id}/{filename}""" def snake_case ( A__ ,A__ ,A__=None ,A__=0 ,A__=None ,): UpperCAmelCase_ : Any = "python/{}".format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(A__ ,A__ ): ua += "; " + "; ".join("{}/{}".format(A__ ,A__ ) for k, v in user_agent.items() ) elif isinstance(A__ ,A__ ): ua += "; " + user_agent UpperCAmelCase_ : Any = {"user-agent": ua} if resume_size > 0: UpperCAmelCase_ : Tuple = "bytes=%d-" % (resume_size,) UpperCAmelCase_ : Optional[int] = requests.get(A__ ,stream=A__ ,proxies=A__ ,headers=A__ ) if response.status_code == 4_16: # Range not satisfiable return UpperCAmelCase_ : Union[str, Any] = response.headers.get("Content-Length" ) UpperCAmelCase_ : List[Any] = resume_size + int(A__ ) if content_length is not None else None UpperCAmelCase_ : Tuple = tqdm( unit="B" ,unit_scale=A__ ,total=A__ ,initial=A__ ,desc="Downloading" ,) for chunk in response.iter_content(chunk_size=10_24 ): if chunk: # filter out keep-alive new chunks progress.update(len(A__ ) ) temp_file.write(A__ ) progress.close() def snake_case ( A__ ,A__=None ,A__=False ,A__=None ,A__=10 ,A__=False ,A__=None ,A__=False ,): if cache_dir is None: UpperCAmelCase_ : Dict = TRANSFORMERS_CACHE if isinstance(A__ ,A__ ): UpperCAmelCase_ : Any = str(A__ ) os.makedirs(A__ ,exist_ok=A__ ) UpperCAmelCase_ : Optional[Any] = None if not local_files_only: try: UpperCAmelCase_ : Dict = requests.head(A__ ,allow_redirects=A__ ,proxies=A__ ,timeout=A__ ) if response.status_code == 2_00: UpperCAmelCase_ : Optional[Any] = response.headers.get("ETag" ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass UpperCAmelCase_ : Tuple = url_to_filename(A__ ,A__ ) # get cache path to put the file UpperCAmelCase_ : List[str] = os.path.join(A__ ,A__ ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(A__ ): return cache_path else: UpperCAmelCase_ : Dict = [ file for file in fnmatch.filter(os.listdir(A__ ) ,filename + ".*" ) if not file.endswith(".json" ) and not file.endswith(".lock" ) ] if len(A__ ) > 0: return os.path.join(A__ ,matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(A__ ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. UpperCAmelCase_ : List[Any] = cache_path + ".lock" with FileLock(A__ ): # If the download just completed while the lock was activated. if os.path.exists(A__ ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: UpperCAmelCase_ : Optional[Any] = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(A__ ,"a+b" ) as f: yield f UpperCAmelCase_ : List[Any] = _resumable_file_manager if os.path.exists(A__ ): UpperCAmelCase_ : Dict = os.stat(A__ ).st_size else: UpperCAmelCase_ : Optional[int] = 0 else: UpperCAmelCase_ : List[Any] = partial(tempfile.NamedTemporaryFile ,dir=A__ ,delete=A__ ) UpperCAmelCase_ : Dict = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s" ,A__ ,temp_file.name ,) http_get( A__ ,A__ ,proxies=A__ ,resume_size=A__ ,user_agent=A__ ,) os.replace(temp_file.name ,A__ ) UpperCAmelCase_ : Any = {"url": url, "etag": etag} UpperCAmelCase_ : int = cache_path + ".json" with open(A__ ,"w" ) as meta_file: json.dump(A__ ,A__ ) return cache_path def snake_case ( A__ ,A__=None ): UpperCAmelCase_ : List[str] = url.encode("utf-8" ) UpperCAmelCase_ : List[Any] = shaaaa(A__ ) UpperCAmelCase_ : Dict = url_hash.hexdigest() if etag: UpperCAmelCase_ : Dict = etag.encode("utf-8" ) UpperCAmelCase_ : Any = shaaaa(A__ ) filename += "." + etag_hash.hexdigest() if url.endswith(".h5" ): filename += ".h5" return filename def snake_case ( A__ ,A__=None ,A__=False ,A__=None ,A__=False ,A__=None ,A__=False ,A__=False ,A__=False ,): if cache_dir is None: UpperCAmelCase_ : Dict = TRANSFORMERS_CACHE if isinstance(A__ ,A__ ): UpperCAmelCase_ : Dict = str(A__ ) if isinstance(A__ ,A__ ): UpperCAmelCase_ : Dict = str(A__ ) if is_remote_url(A__ ): # URL, so get it from the cache (downloading if necessary) UpperCAmelCase_ : Union[str, Any] = get_from_cache( A__ ,cache_dir=A__ ,force_download=A__ ,proxies=A__ ,resume_download=A__ ,user_agent=A__ ,local_files_only=A__ ,) elif os.path.exists(A__ ): # File, and it exists. UpperCAmelCase_ : Any = url_or_filename elif urlparse(A__ ).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(A__ ) ) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(A__ ) ) if extract_compressed_file: if not is_zipfile(A__ ) and not tarfile.is_tarfile(A__ ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" UpperCAmelCase_ , UpperCAmelCase_ : Any = os.path.split(A__ ) UpperCAmelCase_ : Tuple = output_file.replace("." ,"-" ) + "-extracted" UpperCAmelCase_ : List[Any] = os.path.join(A__ ,A__ ) if os.path.isdir(A__ ) and os.listdir(A__ ) and not force_extract: return output_path_extracted # Prevent parallel extractions UpperCAmelCase_ : str = output_path + ".lock" with FileLock(A__ ): shutil.rmtree(A__ ,ignore_errors=A__ ) os.makedirs(A__ ) if is_zipfile(A__ ): with ZipFile(A__ ,"r" ) as zip_file: zip_file.extractall(A__ ) zip_file.close() elif tarfile.is_tarfile(A__ ): UpperCAmelCase_ : Optional[int] = tarfile.open(A__ ) tar_file.extractall(A__ ) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(A__ ) ) return output_path_extracted return output_path def snake_case ( A__ ,A__="," ): assert isinstance(A__ ,A__ ) if os.path.isfile(A__ ): with open(A__ ) as f: UpperCAmelCase_ : str = eval(f.read() ) else: UpperCAmelCase_ : int = requests.get(A__ ) try: UpperCAmelCase_ : int = requests.json() except Exception: UpperCAmelCase_ : Dict = req.content.decode() assert data is not None, "could not connect" try: UpperCAmelCase_ : Tuple = eval(A__ ) except Exception: UpperCAmelCase_ : Tuple = data.split("\n" ) req.close() return data def snake_case ( A__ ): UpperCAmelCase_ : Tuple = requests.get(A__ ) UpperCAmelCase_ : List[Any] = np.array(Image.open(BytesIO(response.content ) ) ) return img def snake_case ( A__ ): UpperCAmelCase_ : List[Any] = url.split("/" )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(A__ ) with open(A__ ,"rb" ) as stream: UpperCAmelCase_ : int = pkl.load(A__ ) UpperCAmelCase_ : Dict = weights.pop("model" ) UpperCAmelCase_ : Dict = {} for k, v in model.items(): UpperCAmelCase_ : Tuple = torch.from_numpy(A__ ) if "running_var" in k: UpperCAmelCase_ : Dict = torch.tensor([0] ) UpperCAmelCase_ : Union[str, Any] = k.replace("running_var" ,"num_batches_tracked" ) UpperCAmelCase_ : Tuple = zero return new def snake_case ( ): print(F"""{os.path.abspath(os.path.join(A__ ,os.pardir ) )}/demo.ipynb""" ) def snake_case ( A__ ,A__="RGB" ): assert isinstance(A__ ,A__ ) if os.path.isfile(A__ ): UpperCAmelCase_ : Tuple = cva.imread(A__ ) else: UpperCAmelCase_ : Tuple = get_image_from_url(A__ ) assert img is not None, F"""could not connect to: {im}""" UpperCAmelCase_ : Any = cva.cvtColor(A__ ,cva.COLOR_BGR2RGB ) if input_format == "RGB": UpperCAmelCase_ : Tuple = img[:, :, ::-1] return img def snake_case ( A__ ,A__=1 ): return (images[i : i + batch] for i in range(0 ,len(A__ ) ,A__ ))
268
"""simple docstring""" def snake_case ( A__ ,A__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps UpperCAmelCase_ : Optional[int] = boundary[0] UpperCAmelCase_ : str = boundary[1] UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ ) UpperCAmelCase_ : List[str] = 0.0 y += (h / 2.0) * f(A__ ) for i in x_i: # print(i) y += h * f(A__ ) y += (h / 2.0) * f(A__ ) return y def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase_ : Optional[Any] = x + h def snake_case ( A__ ): # enter your function here UpperCAmelCase_ : Dict = (x - 0) * (x - 0) return y def snake_case ( ): UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
268
1
"""simple docstring""" import tensorflow as tf from ...tf_utils import shape_list class UpperCamelCase_ (tf.keras.layers.Layer ): def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : str=False , **lowerCAmelCase_ : Optional[int] ) -> str: super().__init__(**lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : int = d_embed UpperCAmelCase_ : List[Any] = d_proj UpperCAmelCase_ : Tuple = cutoffs + [vocab_size] UpperCAmelCase_ : Any = [0] + self.cutoffs UpperCAmelCase_ : List[Any] = div_val UpperCAmelCase_ : str = self.cutoffs[0] UpperCAmelCase_ : List[str] = len(self.cutoffs ) - 1 UpperCAmelCase_ : Optional[int] = self.shortlist_size + self.n_clusters UpperCAmelCase_ : List[Any] = keep_order UpperCAmelCase_ : Any = [] UpperCAmelCase_ : Any = [] def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Tuple: if self.n_clusters > 0: UpperCAmelCase_ : Optional[int] = self.add_weight( shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase_ , name="cluster_weight" ) UpperCAmelCase_ : int = self.add_weight( shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCAmelCase_ , name="cluster_bias" ) if self.div_val == 1: for i in range(len(self.cutoffs ) ): if self.d_proj != self.d_embed: UpperCAmelCase_ : Union[str, Any] = self.add_weight( shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase_ , name=f"""out_projs_._{i}""" , ) self.out_projs.append(lowerCAmelCase_ ) else: self.out_projs.append(lowerCAmelCase_ ) UpperCAmelCase_ : Any = self.add_weight( shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase_ , name=f"""out_layers_._{i}_._weight""" , ) UpperCAmelCase_ : str = self.add_weight( shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCAmelCase_ , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) else: for i in range(len(self.cutoffs ) ): UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1] UpperCAmelCase_ : Optional[Any] = self.d_embed // (self.div_val**i) UpperCAmelCase_ : str = self.add_weight( shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase_ , name=f"""out_projs_._{i}""" ) self.out_projs.append(lowerCAmelCase_ ) UpperCAmelCase_ : int = self.add_weight( shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCAmelCase_ , name=f"""out_layers_._{i}_._weight""" , ) UpperCAmelCase_ : Optional[Any] = self.add_weight( shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCAmelCase_ , name=f"""out_layers_._{i}_._bias""" , ) self.out_layers.append((weight, bias) ) super().build(lowerCAmelCase_ ) @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any]=None ) -> List[Any]: UpperCAmelCase_ : List[Any] = x if proj is not None: UpperCAmelCase_ : List[str] = tf.einsum("ibd,ed->ibe" , lowerCAmelCase_ , lowerCAmelCase_ ) return tf.einsum("ibd,nd->ibn" , lowerCAmelCase_ , lowerCAmelCase_ ) + b @staticmethod def _SCREAMING_SNAKE_CASE ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ) -> List[str]: UpperCAmelCase_ : Tuple = shape_list(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tf.range(lp_size[0] , dtype=target.dtype ) UpperCAmelCase_ : int = tf.stack([r, target] , 1 ) return tf.gather_nd(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[int]=False ) -> List[Any]: UpperCAmelCase_ : str = 0 if self.n_clusters == 0: UpperCAmelCase_ : Any = self._logit(lowerCAmelCase_ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] ) if target is not None: UpperCAmelCase_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tf.nn.log_softmax(lowerCAmelCase_ , axis=-1 ) else: UpperCAmelCase_ : str = shape_list(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = tf.zeros(hidden_sizes[:2] ) for i in range(len(self.cutoffs ) ): UpperCAmelCase_ , UpperCAmelCase_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1] if target is not None: UpperCAmelCase_ : List[Any] = (target >= l_idx) & (target < r_idx) UpperCAmelCase_ : Dict = tf.where(lowerCAmelCase_ ) UpperCAmelCase_ : int = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) - l_idx if self.div_val == 1: UpperCAmelCase_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx] UpperCAmelCase_ : Tuple = self.out_layers[0][1][l_idx:r_idx] else: UpperCAmelCase_ : Any = self.out_layers[i][0] UpperCAmelCase_ : Tuple = self.out_layers[i][1] if i == 0: UpperCAmelCase_ : Tuple = tf.concat([cur_W, self.cluster_weight] , 0 ) UpperCAmelCase_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 ) UpperCAmelCase_ : Tuple = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[0] ) UpperCAmelCase_ : Dict = tf.nn.log_softmax(lowerCAmelCase_ ) out.append(head_logprob[..., : self.cutoffs[0]] ) if target is not None: UpperCAmelCase_ : str = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ ) else: UpperCAmelCase_ : Union[str, Any] = self._logit(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , self.out_projs[i] ) UpperCAmelCase_ : int = tf.nn.log_softmax(lowerCAmelCase_ ) UpperCAmelCase_ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster UpperCAmelCase_ : Optional[int] = head_logprob[..., cluster_prob_idx, None] + tail_logprob out.append(lowerCAmelCase_ ) if target is not None: UpperCAmelCase_ : List[Any] = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = tf.boolean_mask(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = self._gather_logprob(lowerCAmelCase_ , lowerCAmelCase_ ) cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1] if target is not None: loss += tf.scatter_nd(lowerCAmelCase_ , -cur_logprob , shape_list(lowerCAmelCase_ ) ) UpperCAmelCase_ : str = tf.concat(lowerCAmelCase_ , axis=-1 ) if target is not None: if return_mean: UpperCAmelCase_ : Union[str, Any] = tf.reduce_mean(lowerCAmelCase_ ) # Add the training-time loss value to the layer using `self.add_loss()`. self.add_loss(lowerCAmelCase_ ) # Log the loss as a metric (we could log arbitrary metrics, # including different metrics for training and inference. self.add_metric(lowerCAmelCase_ , name=self.name , aggregation="mean" if return_mean else "" ) return out
268
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def snake_case ( A__ ,A__ ,A__ ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] UpperCAmelCase_ : Dict = (low + high) // 2 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1 UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1 UpperCAmelCase_ : int | float = 0 for i in range(A__ ,low - 1 ,-1 ): summ += arr[i] if summ > left_sum: UpperCAmelCase_ : str = summ UpperCAmelCase_ : Any = i UpperCAmelCase_ : Dict = 0 for i in range(mid + 1 ,high + 1 ): summ += arr[i] if summ > right_sum: UpperCAmelCase_ : List[Any] = summ UpperCAmelCase_ : Optional[Any] = i return max_left, max_right, (left_sum + right_sum) def snake_case ( A__ ): UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )] UpperCAmelCase_ : str = time.time() max_subarray(A__ ,0 ,input_size - 1 ) UpperCAmelCase_ : int = time.time() return end - start def snake_case ( ): UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00] UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(A__ ,A__ ): print(A__ ,"\t\t" ,A__ ) plt.plot(A__ ,A__ ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
268
1
"""simple docstring""" def snake_case ( A__ ,A__ ): if discount_rate < 0: raise ValueError("Discount rate cannot be negative" ) if not cash_flows: raise ValueError("Cash flows list cannot be empty" ) UpperCAmelCase_ : Any = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(A__ ) ) return round(A__ ,ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" from __future__ import annotations import time lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict: UpperCAmelCase_ : Any = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : int = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Union[str, Any] = parent class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None: while self.node_queue: UpperCAmelCase_ : str = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[Any] = True return self.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ ) for node in successors: self.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : List[str] = [] for action in delta: UpperCAmelCase_ : List[Any] = parent.pos_x + action[1] UpperCAmelCase_ : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) ) return successors def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : Union[str, Any] = node UpperCAmelCase_ : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase_ : str = True return self.retrace_bidirectional_path( lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = current_bwd_node UpperCAmelCase_ : List[str] = current_fwd_node UpperCAmelCase_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path: UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ = time.time() lowerCamelCase_ = BreadthFirstSearch(init, goal) lowerCamelCase_ = bfs.search() lowerCamelCase_ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCamelCase_ = time.time() lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ = bd_bfs.search() lowerCamelCase_ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
268
1
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCamelCase_ = 3 def snake_case ( A__ ): print("Generating primitive root of p" ) while True: UpperCAmelCase_ : Dict = random.randrange(3 ,A__ ) if pow(A__ ,2 ,A__ ) == 1: continue if pow(A__ ,A__ ,A__ ) == 1: continue return g def snake_case ( A__ ): print("Generating prime p..." ) UpperCAmelCase_ : str = rabin_miller.generate_large_prime(A__ ) # select large prime number. UpperCAmelCase_ : Union[str, Any] = primitive_root(A__ ) # one primitive root on modulo p. UpperCAmelCase_ : Tuple = random.randrange(3 ,A__ ) # private_key -> have to be greater than 2 for safety. UpperCAmelCase_ : Dict = cryptomath.find_mod_inverse(pow(A__ ,A__ ,A__ ) ,A__ ) UpperCAmelCase_ : Dict = (key_size, e_a, e_a, p) UpperCAmelCase_ : Dict = (key_size, d) return public_key, private_key def snake_case ( A__ ,A__ ): if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ): print("\nWARNING:" ) print( F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n""" "Use a different name or delete these files and re-run this program." ) sys.exit() UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_key(A__ ) print(F"""\nWriting public key to file {name}_pubkey.txt...""" ) with open(F"""{name}_pubkey.txt""" ,"w" ) as fo: fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" ) print(F"""Writing private key to file {name}_privkey.txt...""" ) with open(F"""{name}_privkey.txt""" ,"w" ) as fo: fo.write(F"""{private_key[0]},{private_key[1]}""" ) def snake_case ( ): print("Making key files..." ) make_key_files("elgamal" ,20_48 ) print("Key files generation successful" ) if __name__ == "__main__": main()
268
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = MBartTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Tuple = vocab_file UpperCAmelCase_ : str = False if not self.vocab_file else True UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCAmelCase_ : Tuple = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase_ : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : List[str] = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding: UpperCAmelCase_ : List[Any] = src_lang UpperCAmelCase_ : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None: UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCAmelCase_ : List[str] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
268
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=13 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Any=24 , lowerCAmelCase_ : Tuple=16 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[int]=True , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : Union[str, Any]=5 , lowerCAmelCase_ : Optional[Any]=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=2 , ) -> Dict: UpperCAmelCase_ : str = parent UpperCAmelCase_ : Tuple = batch_size UpperCAmelCase_ : List[Any] = patch_size UpperCAmelCase_ : Dict = max_length UpperCAmelCase_ : str = num_mel_bins UpperCAmelCase_ : Tuple = is_training UpperCAmelCase_ : Optional[Any] = use_labels UpperCAmelCase_ : List[Any] = hidden_size UpperCAmelCase_ : Union[str, Any] = num_hidden_layers UpperCAmelCase_ : Union[str, Any] = num_attention_heads UpperCAmelCase_ : Tuple = intermediate_size UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = hidden_dropout_prob UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob UpperCAmelCase_ : str = type_sequence_label_size UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : int = scope UpperCAmelCase_ : Dict = frequency_stride UpperCAmelCase_ : Dict = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase_ : List[str] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 UpperCAmelCase_ : Optional[Any] = (self.max_length - self.patch_size) // self.time_stride + 1 UpperCAmelCase_ : str = frequency_out_dimension * time_out_dimension UpperCAmelCase_ : Any = num_patches + 2 def _SCREAMING_SNAKE_CASE ( self : str ) -> int: UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : List[str] = self.get_config() return config, input_values, labels def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any: return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = ASTModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Dict = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase_ : int = {"input_values": input_values} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) __magic_name__ = ( {'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel} if is_torch_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str ) -> List[str]: if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: UpperCAmelCase_ : Optional[Any] = ASTModelTester(self ) UpperCAmelCase_ : Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason="AST does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Tuple = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase_ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : Optional[int] = ["input_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int: for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : int = ASTModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : List[Any] = hf_hub_download( repo_id="nielsr/audio-spectogram-transformer-checkpoint" ,filename="sample_audio.flac" ,repo_type="dataset" ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = torchaudio.load(A__ ) return audio, sampling_rate @require_torch @require_torchaudio class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple: return ( ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ) if is_torchaudio_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : List[Any] = self.default_feature_extractor UpperCAmelCase_ : Dict = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self.default_feature_extractor UpperCAmelCase_ , UpperCAmelCase_ : List[str] = prepare_audio() UpperCAmelCase_ : Any = audio.squeeze().numpy() UpperCAmelCase_ : int = feature_extractor(lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): UpperCAmelCase_ : Union[str, Any] = model(**lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : Optional[Any] = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : str = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
268
"""simple docstring""" from torch import nn def snake_case ( A__ ): if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F"""Unsupported activation function: {act_fn}""" )
268
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
268
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class UpperCamelCase_ : def __init__( self : str ) -> Dict: UpperCAmelCase_ : List[Any] = "" UpperCAmelCase_ : int = "" UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : int = 0 UpperCAmelCase_ : List[Any] = 256 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : List[Any] = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : List[str] = 0 def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]: UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 ) UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" ) UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) ): UpperCAmelCase_ : List[Any] = x[i] / self.k self.sk += prk UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk if self.rem != 0: UpperCAmelCase_ : Any = int(last % last ) UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size ) UpperCAmelCase_ : Dict = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): UpperCAmelCase_ : Any = self.img[j][i] if num != self.last_list[num]: UpperCAmelCase_ : Tuple = self.last_list[num] cva.imwrite("output_data/output.jpg" , self.img ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: plt.hist(self.img.ravel() , 256 , [0, 256] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: cva.imshow("Output-Image" , self.img ) cva.imshow("Input-Image" , self.original_image ) cva.waitKey(5_000 ) cva.destroyAllWindows() if __name__ == "__main__": lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''') lowerCamelCase_ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
268
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() lowerCamelCase_ = { '''bart''': ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''bert''': ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-uncased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-large-cased-whole-word-masking-finetuned-squad''': ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''bert-base-cased-finetuned-mrpc''': ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''dpr''': ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''gpt2''': ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlnet''': ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm''': ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''xlm-roberta''': ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''transfo-xl''': ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''openai-gpt''': ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''roberta''': ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''layoutlm''': ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), '''roberta-large-mnli''': ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''camembert''': ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''flaubert''': ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert''': ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''distilbert-base-distilled-squad''': ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert''': ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''lxmert-visual-feature-encoder''': ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''ctrl''': ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''albert''': ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''t5''': ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''electra''': ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), '''wav2vec2''': ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def snake_case ( A__ ,A__ ,A__ ,A__ ,A__=False ,A__=True ): if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.""" ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: UpperCAmelCase_ : Tuple = cached_file(A__ ,A__ ,force_download=not use_cached_models ) UpperCAmelCase_ : Dict = config_class.from_json_file(A__ ) UpperCAmelCase_ : int = True UpperCAmelCase_ : Union[str, Any] = True print(F"""Building TensorFlow model from configuration: {config}""" ) UpperCAmelCase_ : Optional[int] = model_class(A__ ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): UpperCAmelCase_ : Optional[Any] = cached_file( A__ ,A__ ,force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: UpperCAmelCase_ : Optional[Any] = load_pytorch_checkpoint_in_tfa_model(A__ ,A__ ) if compare_with_pt_model: UpperCAmelCase_ : Optional[int] = tf_model(tf_model.dummy_inputs ,training=A__ ) # build the network UpperCAmelCase_ : Tuple = torch.load(A__ ,map_location="cpu" ) UpperCAmelCase_ : str = pt_model_class.from_pretrained( pretrained_model_name_or_path=A__ ,config=A__ ,state_dict=A__ ) with torch.no_grad(): UpperCAmelCase_ : Any = pt_model(**pt_model.dummy_inputs ) UpperCAmelCase_ : Tuple = pto[0].numpy() UpperCAmelCase_ : Tuple = tfo[0].numpy() UpperCAmelCase_ : Optional[int] = np.amax(np.abs(np_pt - np_tf ) ) print(F"""Max absolute difference between models outputs {diff}""" ) assert diff <= 2e-2, F"""Error, model absolute difference is >2e-2: {diff}""" # Save pytorch-model print(F"""Save TensorFlow model to {tf_dump_path}""" ) tf_model.save_weights(A__ ,save_format="h5" ) def snake_case ( A__ ,A__ ,A__=None ,A__=None ,A__=False ,A__=False ,A__=False ,A__=False ,): if args_model_type is None: UpperCAmelCase_ : List[str] = list(MODEL_CLASSES.keys() ) else: UpperCAmelCase_ : int = [args_model_type] for j, model_type in enumerate(A__ ,start=1 ): print("=" * 1_00 ) print(F""" Converting model type {j}/{len(A__ )}: {model_type}""" ) print("=" * 1_00 ) if model_type not in MODEL_CLASSES: raise ValueError(F"""Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.""" ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: UpperCAmelCase_ : Optional[int] = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: UpperCAmelCase_ : Dict = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(A__ ,A__ ) ,start=1 ): print("-" * 1_00 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F""" Skipping finetuned checkpoint {model_shortcut_name}""" ) continue UpperCAmelCase_ : Tuple = model_shortcut_name elif only_convert_finetuned_models: print(F""" Skipping not finetuned checkpoint {model_shortcut_name}""" ) continue print( F""" Converting checkpoint {i}/{len(A__ )}: {model_shortcut_name} - model_type {model_type}""" ) print("-" * 1_00 ) if config_shortcut_name in aws_config_map: UpperCAmelCase_ : int = cached_file(A__ ,A__ ,force_download=not use_cached_models ) else: UpperCAmelCase_ : Any = config_shortcut_name if model_shortcut_name in aws_model_maps: UpperCAmelCase_ : Union[str, Any] = cached_file(A__ ,A__ ,force_download=not use_cached_models ) else: UpperCAmelCase_ : Tuple = model_shortcut_name if os.path.isfile(A__ ): UpperCAmelCase_ : List[str] = "converted_model" convert_pt_checkpoint_to_tf( model_type=A__ ,pytorch_checkpoint_path=A__ ,config_file=A__ ,tf_dump_path=os.path.join(A__ ,model_shortcut_name + "-tf_model.h5" ) ,compare_with_pt_model=A__ ,) if remove_cached_files: os.remove(A__ ) os.remove(A__ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.''' ) parser.add_argument( '''--model_type''', default=None, type=str, help=( f'Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and ' '''convert all the models from AWS.''' ), ) parser.add_argument( '''--pytorch_checkpoint_path''', default=None, type=str, help=( '''Path to the PyTorch checkpoint path or shortcut name to download from AWS. ''' '''If not given, will download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--config_file''', default=None, type=str, help=( '''The config json file corresponding to the pre-trained model. \n''' '''This specifies the model architecture. If not given and ''' '''--pytorch_checkpoint_path is not given or is a shortcut name ''' '''use the configuration associated to the shortcut name on the AWS''' ), ) parser.add_argument( '''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.''' ) parser.add_argument( '''--use_cached_models''', action='''store_true''', help='''Use cached models if possible instead of updating to latest checkpoint versions.''', ) parser.add_argument( '''--remove_cached_files''', action='''store_true''', help='''Remove pytorch models after conversion (save memory when converting in batches).''', ) parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''') lowerCamelCase_ = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
268
"""simple docstring""" from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase_ : def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : List[Any]=10 , lowerCAmelCase_ : Any=[10, 20, 30, 40] , lowerCAmelCase_ : Any=[1, 1, 2, 1] , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="relu" , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : Optional[int]=None , ) -> str: UpperCAmelCase_ : Tuple = parent UpperCAmelCase_ : int = batch_size UpperCAmelCase_ : str = image_size UpperCAmelCase_ : List[Any] = num_channels UpperCAmelCase_ : Tuple = embeddings_size UpperCAmelCase_ : Union[str, Any] = hidden_sizes UpperCAmelCase_ : int = depths UpperCAmelCase_ : Optional[Any] = is_training UpperCAmelCase_ : Dict = use_labels UpperCAmelCase_ : str = hidden_act UpperCAmelCase_ : str = num_labels UpperCAmelCase_ : str = scope UpperCAmelCase_ : str = len(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : List[Any] = TFRegNetModel(config=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.num_labels UpperCAmelCase_ : List[Any] = TFRegNetForImageClassification(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: UpperCAmelCase_ : Any = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = config_and_inputs UpperCAmelCase_ : List[str] = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () __magic_name__ = ( {'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification} if is_tf_available() else {} ) __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = TFRegNetModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , ) @slow def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict: super().test_keras_fit() @unittest.skip(reason="RegNet does not support input and output embeddings" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: pass def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Dict = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase_ : List[Any] = [*signature.parameters.keys()] UpperCAmelCase_ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]: UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: def check_hidden_states_output(lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): UpperCAmelCase_ : str = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : Any = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) UpperCAmelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase_ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_ : Tuple = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase_ : List[Any] = layer_type UpperCAmelCase_ : int = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase_ : Optional[int] = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any: UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]={} ): UpperCAmelCase_ : Tuple = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , return_dict=lowerCAmelCase_ , **lowerCAmelCase_ ).to_tuple() def recursive_check(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ): if isinstance(lowerCAmelCase_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(lowerCAmelCase_ , lowerCAmelCase_ ): recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(lowerCAmelCase_ , lowerCAmelCase_ ) ) , msg=( "Tuple and dict output are not equal. Difference:" f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(lowerCAmelCase_ , lowerCAmelCase_ ) for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) UpperCAmelCase_ : List[str] = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) check_equivalence(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , {"output_hidden_states": True} ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]: for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Any = TFRegNetModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def snake_case ( ): UpperCAmelCase_ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class UpperCamelCase_ (unittest.TestCase ): @cached_property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]: return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict: UpperCAmelCase_ : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) UpperCAmelCase_ : Union[str, Any] = self.default_image_processor UpperCAmelCase_ : int = prepare_img() UpperCAmelCase_ : List[Any] = image_processor(images=lowerCAmelCase_ , return_tensors="tf" ) # forward pass UpperCAmelCase_ : Tuple = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits UpperCAmelCase_ : List[str] = tf.TensorShape((1, 1_000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
268
1
"""simple docstring""" import argparse import torch from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) # !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml parser.add_argument( '''--original_config_file''', default=None, type=str, help='''The YAML config file corresponding to the original architecture.''', ) parser.add_argument( '''--num_in_channels''', default=None, type=int, help='''The number of input channels. If `None` number of input channels will be automatically inferred.''', ) parser.add_argument( '''--scheduler_type''', default='''pndm''', type=str, help='''Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']''', ) parser.add_argument( '''--pipeline_type''', default=None, type=str, help=( '''The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\'''' '''. If `None` pipeline will be automatically inferred.''' ), ) parser.add_argument( '''--image_size''', default=None, type=int, help=( '''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2''' ''' Base. Use 768 for Stable Diffusion v2.''' ), ) parser.add_argument( '''--prediction_type''', default=None, type=str, help=( '''The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable''' ''' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.''' ), ) parser.add_argument( '''--extract_ema''', action='''store_true''', help=( '''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights''' ''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield''' ''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.''' ), ) parser.add_argument( '''--upcast_attention''', action='''store_true''', help=( '''Whether the attention computation should always be upcasted. This is necessary when running stable''' ''' diffusion 2.1.''' ), ) parser.add_argument( '''--from_safetensors''', action='''store_true''', help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''', ) parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') parser.add_argument( '''--stable_unclip''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.''', ) parser.add_argument( '''--stable_unclip_prior''', type=str, default=None, required=False, help='''Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.''', ) parser.add_argument( '''--clip_stats_path''', type=str, help='''Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.''', required=False, ) parser.add_argument( '''--controlnet''', action='''store_true''', default=None, help='''Set flag if this is a controlnet checkpoint.''' ) parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''') parser.add_argument( '''--vae_path''', type=str, default=None, required=False, help='''Set to a path, hub id to an already converted vae to not convert it again.''', ) lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = download_from_original_stable_diffusion_ckpt( checkpoint_path=args.checkpoint_path, original_config_file=args.original_config_file, image_size=args.image_size, prediction_type=args.prediction_type, model_type=args.pipeline_type, extract_ema=args.extract_ema, scheduler_type=args.scheduler_type, num_in_channels=args.num_in_channels, upcast_attention=args.upcast_attention, from_safetensors=args.from_safetensors, device=args.device, stable_unclip=args.stable_unclip, stable_unclip_prior=args.stable_unclip_prior, clip_stats_path=args.clip_stats_path, controlnet=args.controlnet, vae_path=args.vae_path, ) if args.half: pipe.to(torch_dtype=torch.floataa) if args.controlnet: # only save the controlnet model pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) else: pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
268
"""simple docstring""" # Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version lowerCamelCase_ = get_logger(__name__) class UpperCamelCase_ : __magic_name__ = '''dummy_data''' __magic_name__ = '''datasets''' __magic_name__ = False def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Union[Version, str] , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[List[Callable]] = None , ) -> Tuple: UpperCAmelCase_ : Optional[int] = 0 UpperCAmelCase_ : int = dataset_name UpperCAmelCase_ : Optional[int] = cache_dir UpperCAmelCase_ : Tuple = use_local_dummy_data UpperCAmelCase_ : int = config # download_callbacks take a single url as input UpperCAmelCase_ : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root UpperCAmelCase_ : Optional[Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general UpperCAmelCase_ : Dict = str(lowerCAmelCase_ ) # to be downloaded UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : int = None @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if self._dummy_file is None: UpperCAmelCase_ : List[str] = self.download_dummy_data() return self._dummy_file @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int: if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: UpperCAmelCase_ : int = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) UpperCAmelCase_ : Union[str, Any] = cached_path( lowerCAmelCase_ , cache_dir=self.cache_dir , extract_compressed_file=lowerCAmelCase_ , force_extract=lowerCAmelCase_ ) return os.path.join(lowerCAmelCase_ , self.dummy_file_name ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]: if self._bucket_url is None: UpperCAmelCase_ : Union[str, Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[str] , *lowerCAmelCase_ : List[Any] ) -> Optional[int]: if self.load_existing_dummy_data: # dummy data is downloaded and tested UpperCAmelCase_ : Dict = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned UpperCAmelCase_ : Optional[int] = self.dummy_file_name # special case when data_url is a dict if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return self.create_dummy_data_dict(lowerCAmelCase_ , lowerCAmelCase_ ) elif isinstance(lowerCAmelCase_ , (list, tuple) ): return self.create_dummy_data_list(lowerCAmelCase_ , lowerCAmelCase_ ) else: return self.create_dummy_data_single(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , *lowerCAmelCase_ : Union[str, Any] ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Any: return self.download_and_extract(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Union[str, Any] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]: return path def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]: return {} def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Dict = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): for single_url in single_urls: download_callback(lowerCAmelCase_ ) else: UpperCAmelCase_ : Tuple = single_urls download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = [os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) for x in single_urls] else: UpperCAmelCase_ : Optional[int] = single_urls UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(Path(lowerCAmelCase_ ).name ) ) UpperCAmelCase_ : int = value # make sure that values are unique if all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique UpperCAmelCase_ : List[str] = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Dict: UpperCAmelCase_ : str = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one UpperCAmelCase_ : int = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , lowerCAmelCase_ ) ) for url in data_url ) UpperCAmelCase_ : Union[str, Any] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): UpperCAmelCase_ : Tuple = [data_url[0]] * len(lowerCAmelCase_ ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Dict = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(lowerCAmelCase_ ) return dummy_data_list def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ) -> Optional[int]: for download_callback in self.download_callbacks: download_callback(lowerCAmelCase_ ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus UpperCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase_ , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(lowerCAmelCase_ ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: pass def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]: def _iter_archive_members(lowerCAmelCase_ : Dict ): # this preserves the order of the members inside the ZIP archive UpperCAmelCase_ : str = Path(self.dummy_file ).parent UpperCAmelCase_ : Optional[Any] = path.relative_to(lowerCAmelCase_ ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: UpperCAmelCase_ : str = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = Path(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = _iter_archive_members(lowerCAmelCase_ ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(lowerCAmelCase_ ).as_posix(), file_path.open("rb" ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> str: if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : str = [paths] for path in paths: if os.path.isfile(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(lowerCAmelCase_ ): if os.path.basename(lowerCAmelCase_ ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(lowerCAmelCase_ ): if filename.startswith((".", "__") ): continue yield os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py lowerCamelCase_ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCamelCase_ = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. lowerCamelCase_ = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') lowerCamelCase_ = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. lowerCamelCase_ = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''') # Fill this with tuples (pipeline_tag, model_mapping, auto_model) lowerCamelCase_ = [ ('''pretraining''', '''MODEL_FOR_PRETRAINING_MAPPING_NAMES''', '''AutoModelForPreTraining'''), ('''feature-extraction''', '''MODEL_MAPPING_NAMES''', '''AutoModel'''), ('''audio-classification''', '''MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioClassification'''), ('''text-generation''', '''MODEL_FOR_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForCausalLM'''), ('''automatic-speech-recognition''', '''MODEL_FOR_CTC_MAPPING_NAMES''', '''AutoModelForCTC'''), ('''image-classification''', '''MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForImageClassification'''), ('''image-segmentation''', '''MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES''', '''AutoModelForImageSegmentation'''), ('''fill-mask''', '''MODEL_FOR_MASKED_LM_MAPPING_NAMES''', '''AutoModelForMaskedLM'''), ('''object-detection''', '''MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForObjectDetection'''), ( '''zero-shot-object-detection''', '''MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES''', '''AutoModelForZeroShotObjectDetection''', ), ('''question-answering''', '''MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForQuestionAnswering'''), ('''text2text-generation''', '''MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES''', '''AutoModelForSeq2SeqLM'''), ('''text-classification''', '''MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForSequenceClassification'''), ('''automatic-speech-recognition''', '''MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES''', '''AutoModelForSpeechSeq2Seq'''), ( '''table-question-answering''', '''MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForTableQuestionAnswering''', ), ('''token-classification''', '''MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForTokenClassification'''), ('''multiple-choice''', '''MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES''', '''AutoModelForMultipleChoice'''), ( '''next-sentence-prediction''', '''MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES''', '''AutoModelForNextSentencePrediction''', ), ( '''audio-frame-classification''', '''MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForAudioFrameClassification''', ), ('''audio-xvector''', '''MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES''', '''AutoModelForAudioXVector'''), ( '''document-question-answering''', '''MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForDocumentQuestionAnswering''', ), ( '''visual-question-answering''', '''MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES''', '''AutoModelForVisualQuestionAnswering''', ), ('''image-to-text''', '''MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES''', '''AutoModelForVision2Seq'''), ( '''zero-shot-image-classification''', '''MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForZeroShotImageClassification''', ), ('''depth-estimation''', '''MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES''', '''AutoModelForDepthEstimation'''), ('''video-classification''', '''MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES''', '''AutoModelForVideoClassification'''), ('''mask-generation''', '''MODEL_FOR_MASK_GENERATION_MAPPING_NAMES''', '''AutoModelForMaskGeneration'''), ] def snake_case ( A__ ): UpperCAmelCase_ : Tuple = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" ,A__ ) return [m.group(0 ) for m in matches] def snake_case ( ): UpperCAmelCase_ : List[Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES UpperCAmelCase_ : str = { config.replace("Config" ,"" ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. UpperCAmelCase_ : Optional[Any] = collections.defaultdict(A__ ) UpperCAmelCase_ : Optional[Any] = collections.defaultdict(A__ ) UpperCAmelCase_ : Optional[int] = collections.defaultdict(A__ ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(A__ ): UpperCAmelCase_ : str = None if _re_tf_models.match(A__ ) is not None: UpperCAmelCase_ : Union[str, Any] = tf_models UpperCAmelCase_ : Optional[Any] = _re_tf_models.match(A__ ).groups()[0] elif _re_flax_models.match(A__ ) is not None: UpperCAmelCase_ : Union[str, Any] = flax_models UpperCAmelCase_ : Dict = _re_flax_models.match(A__ ).groups()[0] elif _re_pt_models.match(A__ ) is not None: UpperCAmelCase_ : Optional[int] = pt_models UpperCAmelCase_ : Dict = _re_pt_models.match(A__ ).groups()[0] if lookup_dict is not None: while len(A__ ) > 0: if attr_name in model_prefix_to_model_type: UpperCAmelCase_ : int = True break # Try again after removing the last word in the name UpperCAmelCase_ : int = "".join(camel_case_split(A__ )[:-1] ) UpperCAmelCase_ : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) UpperCAmelCase_ : str = list(A__ ) all_models.sort() UpperCAmelCase_ : Any = {"model_type": all_models} UpperCAmelCase_ : Optional[Any] = [pt_models[t] for t in all_models] UpperCAmelCase_ : Optional[Any] = [tf_models[t] for t in all_models] UpperCAmelCase_ : str = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure UpperCAmelCase_ : Optional[Any] = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: UpperCAmelCase_ : Tuple = "AutoProcessor" elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: UpperCAmelCase_ : Tuple = "AutoTokenizer" elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: UpperCAmelCase_ : Tuple = "AutoFeatureExtractor" else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. UpperCAmelCase_ : Optional[int] = "AutoTokenizer" UpperCAmelCase_ : Optional[int] = [processors[t] for t in all_models] return pd.DataFrame(A__ ) def snake_case ( A__ ): UpperCAmelCase_ : Optional[Any] = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: UpperCAmelCase_ : int = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""] UpperCAmelCase_ : Any = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""] # Loop through all three frameworks for module, cls, mapping in zip(A__ ,A__ ,A__ ): # The type of pipeline may not exist in this framework if not hasattr(A__ ,A__ ): continue # First extract all model_names UpperCAmelCase_ : Any = [] for name in getattr(A__ ,A__ ).values(): if isinstance(A__ ,A__ ): model_names.append(A__ ) else: model_names.extend(list(A__ ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Dict = get_frameworks_table() UpperCAmelCase_ : Optional[Any] = Dataset.from_pandas(A__ ) UpperCAmelCase_ : int = hf_hub_download( "huggingface/transformers-metadata" ,"pipeline_tags.json" ,repo_type="dataset" ,token=A__ ) UpperCAmelCase_ : Dict = Dataset.from_json(A__ ) UpperCAmelCase_ : int = { tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"]) for i in range(len(A__ ) ) } UpperCAmelCase_ : List[Any] = update_pipeline_and_auto_class_table(A__ ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. UpperCAmelCase_ : Optional[int] = sorted(table.keys() ) UpperCAmelCase_ : Any = pd.DataFrame( { "model_class": model_classes, "pipeline_tag": [table[m][0] for m in model_classes], "auto_class": [table[m][1] for m in model_classes], } ) UpperCAmelCase_ : int = Dataset.from_pandas(A__ ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(A__ ,"frameworks.json" ) ) tags_dataset.to_json(os.path.join(A__ ,"pipeline_tags.json" ) ) if commit_sha is not None: UpperCAmelCase_ : Union[str, Any] = ( F"""Update with commit {commit_sha}\n\nSee: """ F"""https://github.com/huggingface/transformers/commit/{commit_sha}""" ) else: UpperCAmelCase_ : List[Any] = "Update" upload_folder( repo_id="huggingface/transformers-metadata" ,folder_path=A__ ,repo_type="dataset" ,token=A__ ,commit_message=A__ ,) def snake_case ( ): UpperCAmelCase_ : Union[str, Any] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} UpperCAmelCase_ : str = transformers_module.pipelines.SUPPORTED_TASKS UpperCAmelCase_ : Optional[int] = [] for key in pipeline_tasks: if key not in in_table: UpperCAmelCase_ : str = pipeline_tasks[key]["pt"] if isinstance(A__ ,(list, tuple) ): UpperCAmelCase_ : Dict = model[0] UpperCAmelCase_ : int = model.__name__ if model not in in_table.values(): missing.append(A__ ) if len(A__ ) > 0: UpperCAmelCase_ : Optional[Any] = ", ".join(A__ ) raise ValueError( "The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside " F"""`utils/update_metadata.py`: {msg}. Please add them!""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument('''--token''', type=str, help='''The token to use to push to the transformers-metadata dataset.''') parser.add_argument('''--commit_sha''', type=str, help='''The sha of the commit going with this update.''') parser.add_argument('''--check-only''', action='''store_true''', help='''Activate to just check all pipelines are present.''') lowerCamelCase_ = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
268
"""simple docstring""" lowerCamelCase_ = [ (1000, '''M'''), (900, '''CM'''), (500, '''D'''), (400, '''CD'''), (100, '''C'''), (90, '''XC'''), (50, '''L'''), (40, '''XL'''), (10, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def snake_case ( A__ ): UpperCAmelCase_ : List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 1_00, "D": 5_00, "M": 10_00} UpperCAmelCase_ : Optional[Any] = 0 UpperCAmelCase_ : Tuple = 0 while place < len(A__ ): if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = [] for arabic, roman in ROMAN: ((UpperCAmelCase_) , (UpperCAmelCase_)) : str = divmod(A__ ,A__ ) result.append(roman * factor ) if number == 0: break return "".join(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : List[str] = relative_attention UpperCAmelCase_ : List[Any] = position_biased_input UpperCAmelCase_ : Dict = pos_att_type UpperCAmelCase_ : Optional[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Tuple = None if self.use_input_mask: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = self.get_config() UpperCAmelCase_ : int = 300 return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = DebertaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ (unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" ) UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
268
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ (__A ): def __init__( self : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=13 , lowerCAmelCase_ : Tuple=7 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : str=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : List[str]=99 , lowerCAmelCase_ : int=32 , lowerCAmelCase_ : List[str]=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : str=37 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : List[Any]=512 , lowerCAmelCase_ : Optional[int]=16 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Union[str, Any]="None" , lowerCAmelCase_ : List[Any]=3 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=None , ) -> Dict: UpperCAmelCase_ : Dict = parent UpperCAmelCase_ : Union[str, Any] = batch_size UpperCAmelCase_ : Optional[Any] = seq_length UpperCAmelCase_ : List[Any] = is_training UpperCAmelCase_ : Optional[int] = use_input_mask UpperCAmelCase_ : int = use_token_type_ids UpperCAmelCase_ : Any = use_labels UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Any = hidden_size UpperCAmelCase_ : Dict = num_hidden_layers UpperCAmelCase_ : List[Any] = num_attention_heads UpperCAmelCase_ : List[Any] = intermediate_size UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase_ : Any = max_position_embeddings UpperCAmelCase_ : Union[str, Any] = type_vocab_size UpperCAmelCase_ : Union[str, Any] = type_sequence_label_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : int = num_labels UpperCAmelCase_ : Optional[Any] = num_choices UpperCAmelCase_ : List[str] = relative_attention UpperCAmelCase_ : List[Any] = position_biased_input UpperCAmelCase_ : Dict = pos_att_type UpperCAmelCase_ : Optional[Any] = scope def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict: UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : Tuple = None if self.use_input_mask: UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCAmelCase_ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ : Optional[Any] = None UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Union[str, Any] = None if self.use_labels: UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict: return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : List[str] = self.get_config() UpperCAmelCase_ : int = 300 return config def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int ) -> List[Any]: self.parent.assertListEqual(list(result.loss.size() ) , [] ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = DebertaModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[int] = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0] UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ) -> List[Any]: UpperCAmelCase_ : Union[str, Any] = DebertaForMaskedLM(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : Any = self.num_labels UpperCAmelCase_ : List[Any] = DebertaForSequenceClassification(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ) -> str: UpperCAmelCase_ : Optional[int] = self.num_labels UpperCAmelCase_ : Optional[int] = DebertaForTokenClassification(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str ) -> List[Any]: UpperCAmelCase_ : Dict = DebertaForQuestionAnswering(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() UpperCAmelCase_ : Any = model( lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Tuple = config_and_inputs UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ (__A , __A , unittest.TestCase ): __magic_name__ = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) __magic_name__ = ( { '''feature-extraction''': DebertaModel, '''fill-mask''': DebertaForMaskedLM, '''question-answering''': DebertaForQuestionAnswering, '''text-classification''': DebertaForSequenceClassification, '''token-classification''': DebertaForTokenClassification, '''zero-shot''': DebertaForSequenceClassification, } if is_torch_available() else {} ) __magic_name__ = True __magic_name__ = False __magic_name__ = False __magic_name__ = False __magic_name__ = False def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = DebertaModelTester(self ) UpperCAmelCase_ : Any = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: self.config_tester.run_common_tests() def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]: UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]: UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple: for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ (unittest.TestCase ): @unittest.skip(reason="Model not available yet" ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]: pass @slow def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]: UpperCAmelCase_ : Optional[int] = DebertaModel.from_pretrained("microsoft/deberta-base" ) UpperCAmelCase_ : List[Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] ) UpperCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase_ : Optional[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = torch.tensor( [[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
268
1
"""simple docstring""" from typing import Optional import torch import torch.utils.checkpoint from torch import Tensor, nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward from ...modeling_outputs import ( BaseModelOutputWithNoAttention, BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_regnet import RegNetConfig lowerCamelCase_ = logging.get_logger(__name__) # General docstring lowerCamelCase_ = '''RegNetConfig''' # Base docstring lowerCamelCase_ = '''facebook/regnet-y-040''' lowerCamelCase_ = [1, 1088, 7, 7] # Image classification docstring lowerCamelCase_ = '''facebook/regnet-y-040''' lowerCamelCase_ = '''tabby, tabby cat''' lowerCamelCase_ = [ '''facebook/regnet-y-040''', # See all regnet models at https://huggingface.co/models?filter=regnet ] class UpperCamelCase_ (nn.Module ): def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : Optional[str] = "relu" , ) -> Optional[int]: super().__init__() UpperCAmelCase_ : Union[str, Any] = nn.Convad( lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , groups=lowerCAmelCase_ , bias=lowerCAmelCase_ , ) UpperCAmelCase_ : int = nn.BatchNormad(lowerCAmelCase_ ) UpperCAmelCase_ : Any = ACTaFN[activation] if activation is not None else nn.Identity() def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> int: UpperCAmelCase_ : List[str] = self.convolution(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.normalization(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : int , lowerCAmelCase_ : RegNetConfig ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : Optional[int] = RegNetConvLayer( config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act ) UpperCAmelCase_ : Tuple = config.num_channels def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Tuple ) -> Tuple: UpperCAmelCase_ : Union[str, Any] = pixel_values.shape[1] if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) UpperCAmelCase_ : Optional[Any] = self.embedder(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 ) -> List[Any]: super().__init__() UpperCAmelCase_ : Optional[int] = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ ) UpperCAmelCase_ : int = nn.BatchNormad(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Tensor ) -> Tensor: UpperCAmelCase_ : str = self.convolution(lowerCAmelCase_ ) UpperCAmelCase_ : int = self.normalization(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> str: super().__init__() UpperCAmelCase_ : str = nn.AdaptiveAvgPoolad((1, 1) ) UpperCAmelCase_ : int = nn.Sequential( nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , nn.Sigmoid() , ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Optional[Any]: # b c h w -> b c 1 1 UpperCAmelCase_ : List[Any] = self.pooler(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = self.attention(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = hidden_state * attention return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : Any , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 ) -> Optional[Any]: super().__init__() UpperCAmelCase_ : List[str] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : Union[str, Any] = max(1 , out_channels // config.groups_width ) UpperCAmelCase_ : List[Any] = ( RegNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Dict = nn.Sequential( RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) UpperCAmelCase_ : List[Any] = ACTaFN[config.hidden_act] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Any ) -> List[Any]: UpperCAmelCase_ : str = hidden_state UpperCAmelCase_ : Union[str, Any] = self.layer(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.shortcut(lowerCAmelCase_ ) hidden_state += residual UpperCAmelCase_ : int = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 1 ) -> Tuple: super().__init__() UpperCAmelCase_ : Optional[Any] = in_channels != out_channels or stride != 1 UpperCAmelCase_ : List[str] = max(1 , out_channels // config.groups_width ) UpperCAmelCase_ : Dict = ( RegNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity() ) UpperCAmelCase_ : Optional[Any] = nn.Sequential( RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCAmelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , ) UpperCAmelCase_ : Tuple = ACTaFN[config.hidden_act] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Union[str, Any] ) -> Dict: UpperCAmelCase_ : Dict = hidden_state UpperCAmelCase_ : Optional[int] = self.layer(lowerCAmelCase_ ) UpperCAmelCase_ : Any = self.shortcut(lowerCAmelCase_ ) hidden_state += residual UpperCAmelCase_ : str = self.activation(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : List[Any] , lowerCAmelCase_ : RegNetConfig , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , ) -> int: super().__init__() UpperCAmelCase_ : List[str] = RegNetXLayer if config.layer_type == "x" else RegNetYLayer UpperCAmelCase_ : int = nn.Sequential( # downsampling is done in the first layer with stride of 2 layer( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for _ in range(depth - 1 )] , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int] ) -> Optional[int]: UpperCAmelCase_ : List[str] = self.layers(lowerCAmelCase_ ) return hidden_state class UpperCamelCase_ (nn.Module ): def __init__( self : List[str] , lowerCAmelCase_ : RegNetConfig ) -> List[Any]: super().__init__() UpperCAmelCase_ : List[str] = nn.ModuleList([] ) # based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input self.stages.append( RegNetStage( lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) ) UpperCAmelCase_ : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] ) for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ): self.stages.append(RegNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> BaseModelOutputWithNoAttention: UpperCAmelCase_ : Optional[int] = () if output_hidden_states else None for stage_module in self.stages: if output_hidden_states: UpperCAmelCase_ : Union[str, Any] = hidden_states + (hidden_state,) UpperCAmelCase_ : Tuple = stage_module(lowerCAmelCase_ ) if output_hidden_states: UpperCAmelCase_ : List[Any] = hidden_states + (hidden_state,) if not return_dict: return tuple(v for v in [hidden_state, hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ ) class UpperCamelCase_ (__A ): __magic_name__ = RegNetConfig __magic_name__ = '''regnet''' __magic_name__ = '''pixel_values''' __magic_name__ = True def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> Dict: if isinstance(lowerCAmelCase_ , nn.Convad ): nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" ) elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ): nn.init.constant_(module.weight , 1 ) nn.init.constant_(module.bias , 0 ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=False ) -> Optional[Any]: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : List[str] = value lowerCamelCase_ = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`RegNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase_ = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConvNextImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare RegNet model outputting raw features without any specific head on top.''' , __A , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet class UpperCamelCase_ (__A ): def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> Optional[int]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Any = config UpperCAmelCase_ : Any = RegNetEmbeddings(lowerCAmelCase_ ) UpperCAmelCase_ : Any = RegNetEncoder(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Tensor , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention: UpperCAmelCase_ : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) UpperCAmelCase_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Union[str, Any] = self.embedder(lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.encoder( lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = encoder_outputs[0] UpperCAmelCase_ : Optional[int] = self.pooler(lowerCAmelCase_ ) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , ) @add_start_docstrings( ''' RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __A , ) # Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet class UpperCamelCase_ (__A ): def __init__( self : Tuple , lowerCAmelCase_ : str ) -> Union[str, Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = config.num_labels UpperCAmelCase_ : Tuple = RegNetModel(lowerCAmelCase_ ) # classification head UpperCAmelCase_ : Optional[int] = nn.Sequential( nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , ) # initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowerCAmelCase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[torch.LongTensor] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention: UpperCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict UpperCAmelCase_ : Any = self.regnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = outputs.pooler_output if return_dict else outputs[1] UpperCAmelCase_ : Optional[int] = self.classifier(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: UpperCAmelCase_ : Tuple = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): UpperCAmelCase_ : Dict = "single_label_classification" else: UpperCAmelCase_ : List[str] = "multi_label_classification" if self.config.problem_type == "regression": UpperCAmelCase_ : Union[str, Any] = MSELoss() if self.num_labels == 1: UpperCAmelCase_ : Union[str, Any] = loss_fct(logits.squeeze() , labels.squeeze() ) else: UpperCAmelCase_ : Any = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) elif self.config.problem_type == "single_label_classification": UpperCAmelCase_ : Dict = CrossEntropyLoss() UpperCAmelCase_ : Optional[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": UpperCAmelCase_ : Tuple = BCEWithLogitsLoss() UpperCAmelCase_ : List[str] = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ ) if not return_dict: UpperCAmelCase_ : Optional[int] = (logits,) + outputs[2:] return (loss,) + output if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
268
"""simple docstring""" import os def snake_case ( ): with open(os.path.dirname(A__ ) + "/grid.txt" ) as f: UpperCAmelCase_ : Any = [] # noqa: E741 for _ in range(20 ): l.append([int(A__ ) for x in f.readline().split()] ) UpperCAmelCase_ : Any = 0 # right for i in range(20 ): for j in range(17 ): UpperCAmelCase_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: UpperCAmelCase_ : Any = temp # down for i in range(17 ): for j in range(20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: UpperCAmelCase_ : Tuple = temp # diagonal 1 for i in range(17 ): for j in range(17 ): UpperCAmelCase_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 ,20 ): UpperCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: UpperCAmelCase_ : List[str] = temp return maximum if __name__ == "__main__": print(solution())
268
1
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) class UpperCamelCase_ (__A ): __magic_name__ = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : Union[str, Any]="<unk>" , lowerCAmelCase_ : List[Any]="<pad>" , lowerCAmelCase_ : Optional[Any]=125 , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Any , ) -> None: # Add extra_ids to the special token list if extra_ids > 0 and additional_special_tokens is None: UpperCAmelCase_ : List[Any] = [f"""<extra_id_{i}>""" for i in range(lowerCAmelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens UpperCAmelCase_ : Dict = len(set(filter(lambda lowerCAmelCase_ : bool("extra_id" in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are""" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens" ) UpperCAmelCase_ : Tuple = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else pad_token UpperCAmelCase_ : str = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else eos_token UpperCAmelCase_ : List[str] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else unk_token super().__init__( eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Any = extra_ids UpperCAmelCase_ : int = 2**8 # utf is 8 bits # define special tokens dict UpperCAmelCase_ : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } UpperCAmelCase_ : Optional[int] = len(self.special_tokens_encoder ) UpperCAmelCase_ : str = len(lowerCAmelCase_ ) for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Union[str, Any] = self.vocab_size + i - n UpperCAmelCase_ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]: return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None , lowerCAmelCase_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(lowerCAmelCase_ )) + [1] return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1] def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[int] ) -> List[int]: if len(lowerCAmelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated""" " eos tokens being added." ) return token_ids else: return token_ids + [self.eos_token_id] def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : int = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : Dict = self._add_eos_if_not_present(lowerCAmelCase_ ) if token_ids_a is None: return token_ids_a else: UpperCAmelCase_ : List[str] = self._add_eos_if_not_present(lowerCAmelCase_ ) return token_ids_a + token_ids_a def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : str ) -> List[str]: UpperCAmelCase_ : Tuple = [chr(lowerCAmelCase_ ) for i in text.encode("utf-8" )] return tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> int: if token in self.special_tokens_encoder: UpperCAmelCase_ : Optional[Any] = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: UpperCAmelCase_ : str = self.added_tokens_encoder[token] elif len(lowerCAmelCase_ ) != 1: UpperCAmelCase_ : int = self.unk_token_id else: UpperCAmelCase_ : List[Any] = ord(lowerCAmelCase_ ) + self._num_special_tokens return token_id def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] ) -> Tuple: if index in self.special_tokens_decoder: UpperCAmelCase_ : List[str] = self.special_tokens_decoder[index] else: UpperCAmelCase_ : Optional[Any] = chr(index - self._num_special_tokens ) return token def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Optional[int] ) -> Dict: UpperCAmelCase_ : Union[str, Any] = b"" for token in tokens: if token in self.special_tokens_decoder: UpperCAmelCase_ : Tuple = self.special_tokens_decoder[token].encode("utf-8" ) elif token in self.added_tokens_decoder: UpperCAmelCase_ : Optional[Any] = self.special_tokens_decoder[token].encode("utf-8" ) elif token in self.special_tokens_encoder: UpperCAmelCase_ : str = token.encode("utf-8" ) elif token in self.added_tokens_encoder: UpperCAmelCase_ : List[str] = token.encode("utf-8" ) else: UpperCAmelCase_ : Tuple = bytes([ord(lowerCAmelCase_ )] ) bstring += tok_string UpperCAmelCase_ : Optional[int] = bstring.decode("utf-8" , errors="ignore" ) return string def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: return ()
268
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor def snake_case ( A__ ): UpperCAmelCase_ : Dict = SwinConfig(image_size=1_92 ) if "base" in model_name: UpperCAmelCase_ : Any = 6 UpperCAmelCase_ : Optional[Any] = 1_28 UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2) UpperCAmelCase_ : List[str] = (4, 8, 16, 32) elif "large" in model_name: UpperCAmelCase_ : Dict = 12 UpperCAmelCase_ : int = 1_92 UpperCAmelCase_ : List[Any] = (2, 2, 18, 2) UpperCAmelCase_ : int = (6, 12, 24, 48) else: raise ValueError("Model not supported, only supports base and large variants" ) UpperCAmelCase_ : str = window_size UpperCAmelCase_ : Any = embed_dim UpperCAmelCase_ : int = depths UpperCAmelCase_ : Any = num_heads return config def snake_case ( A__ ): if "encoder.mask_token" in name: UpperCAmelCase_ : str = name.replace("encoder.mask_token" ,"embeddings.mask_token" ) if "encoder.patch_embed.proj" in name: UpperCAmelCase_ : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" ) if "encoder.patch_embed.norm" in name: UpperCAmelCase_ : List[str] = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" ) if "attn.proj" in name: UpperCAmelCase_ : Optional[Any] = name.replace("attn.proj" ,"attention.output.dense" ) if "attn" in name: UpperCAmelCase_ : Any = name.replace("attn" ,"attention.self" ) if "norm1" in name: UpperCAmelCase_ : str = name.replace("norm1" ,"layernorm_before" ) if "norm2" in name: UpperCAmelCase_ : Tuple = name.replace("norm2" ,"layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase_ : List[str] = name.replace("mlp.fc1" ,"intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase_ : str = name.replace("mlp.fc2" ,"output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase_ : List[str] = "layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase_ : int = "layernorm.bias" if "decoder" in name: pass else: UpperCAmelCase_ : Any = "swin." + name return name def snake_case ( A__ ,A__ ): for key in orig_state_dict.copy().keys(): UpperCAmelCase_ : Tuple = orig_state_dict.pop(A__ ) if "attn_mask" in key: pass elif "qkv" in key: UpperCAmelCase_ : Optional[int] = key.split("." ) UpperCAmelCase_ : str = int(key_split[2] ) UpperCAmelCase_ : Union[str, Any] = int(key_split[4] ) UpperCAmelCase_ : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase_ : List[Any] = val[:dim, :] UpperCAmelCase_ : str = val[ dim : dim * 2, : ] UpperCAmelCase_ : str = val[-dim:, :] else: UpperCAmelCase_ : List[str] = val[ :dim ] UpperCAmelCase_ : str = val[ dim : dim * 2 ] UpperCAmelCase_ : Optional[Any] = val[ -dim: ] else: UpperCAmelCase_ : Tuple = val return orig_state_dict def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ : List[Any] = torch.load(A__ ,map_location="cpu" )["model"] UpperCAmelCase_ : Optional[Any] = get_swin_config(A__ ) UpperCAmelCase_ : List[Any] = SwinForMaskedImageModeling(A__ ) model.eval() UpperCAmelCase_ : str = convert_state_dict(A__ ,A__ ) model.load_state_dict(A__ ) UpperCAmelCase_ : int = "http://images.cocodataset.org/val2017/000000039769.jpg" UpperCAmelCase_ : int = ViTImageProcessor(size={"height": 1_92, "width": 1_92} ) UpperCAmelCase_ : Any = Image.open(requests.get(A__ ,stream=A__ ).raw ) UpperCAmelCase_ : Any = image_processor(images=A__ ,return_tensors="pt" ) with torch.no_grad(): UpperCAmelCase_ : List[Any] = model(**A__ ).logits print(outputs.keys() ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(A__ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(A__ ) if push_to_hub: print(F"""Pushing model and image processor for {model_name} to hub""" ) model.push_to_hub(F"""microsoft/{model_name}""" ) image_processor.push_to_hub(F"""microsoft/{model_name}""" ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''swin-base-simmim-window6-192''', type=str, choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''], help='''Name of the Swin SimMIM model you\'d like to convert.''', ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''', type=str, help='''Path to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCamelCase_ = parser.parse_args() convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
268
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging lowerCamelCase_ = logging.get_logger(__name__) def snake_case ( ): # Get the sagemaker specific mp parameters from smp_options variable. UpperCAmelCase_ : Optional[Any] = os.getenv("SM_HP_MP_PARAMETERS" ,"{}" ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. UpperCAmelCase_ : int = json.loads(A__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. UpperCAmelCase_ : Union[str, Any] = os.getenv("SM_FRAMEWORK_PARAMS" ,"{}" ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". UpperCAmelCase_ : Dict = json.loads(A__ ) if not mpi_options.get("sagemaker_mpi_enabled" ,A__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed" ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCamelCase_ (__A ): __magic_name__ = field( default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: super().__post_init__() warnings.warn( "`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use " "`TrainingArguments` instead." , lowerCAmelCase_ , ) @cached_property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> "torch.device": logger.info("PyTorch: setting up devices" ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( "torch.distributed process group is initialized, but local_rank == -1. " "In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" ) if self.no_cuda: UpperCAmelCase_ : str = torch.device("cpu" ) UpperCAmelCase_ : int = 0 elif is_sagemaker_model_parallel_available(): UpperCAmelCase_ : Optional[Any] = smp.local_rank() UpperCAmelCase_ : List[str] = torch.device("cuda" , lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta ) UpperCAmelCase_ : Optional[Any] = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) ) UpperCAmelCase_ : int = torch.device("cuda" , self.local_rank ) UpperCAmelCase_ : Optional[int] = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 UpperCAmelCase_ : Optional[Any] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. UpperCAmelCase_ : List[str] = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta ) UpperCAmelCase_ : int = torch.device("cuda" , self.local_rank ) UpperCAmelCase_ : List[str] = 1 if device.type == "cuda": torch.cuda.set_device(lowerCAmelCase_ ) return device @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]: return not is_sagemaker_model_parallel_available() @property def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: return False
268
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''', '''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''', '''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''', '''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''', '''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''', '''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''', '''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''', '''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''rwkv''' __magic_name__ = {'''max_position_embeddings''': '''context_length'''} def __init__( self : str , lowerCAmelCase_ : str=50_277 , lowerCAmelCase_ : Optional[int]=1_024 , lowerCAmelCase_ : Optional[int]=4_096 , lowerCAmelCase_ : Union[str, Any]=32 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : List[Any]=1e-5 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : Tuple=0 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Any=True , **lowerCAmelCase_ : List[Any] , ) -> List[str]: UpperCAmelCase_ : Tuple = vocab_size UpperCAmelCase_ : List[str] = context_length UpperCAmelCase_ : Dict = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Optional[int] = attention_hidden_size if attention_hidden_size is not None else hidden_size UpperCAmelCase_ : Dict = intermediate_size if intermediate_size is not None else 4 * hidden_size UpperCAmelCase_ : Any = layer_norm_epsilon UpperCAmelCase_ : List[Any] = rescale_every UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : List[str] = bos_token_id UpperCAmelCase_ : Union[str, Any] = eos_token_id super().__init__( tie_word_embeddings=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
268
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCamelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCamelCase_ (__A ): __magic_name__ = ['''pixel_values'''] def __init__( self : Union[str, Any] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 255 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = True , **lowerCAmelCase_ : Tuple , ) -> None: super().__init__(**lowerCAmelCase_ ) UpperCAmelCase_ : int = size if size is not None else {"shortest_edge": 224} UpperCAmelCase_ : Dict = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224} UpperCAmelCase_ : Any = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ , param_name="crop_size" ) UpperCAmelCase_ : str = do_resize UpperCAmelCase_ : Any = size UpperCAmelCase_ : Optional[int] = resample UpperCAmelCase_ : List[str] = do_center_crop UpperCAmelCase_ : str = crop_size UpperCAmelCase_ : List[Any] = do_rescale UpperCAmelCase_ : Any = rescale_factor UpperCAmelCase_ : List[Any] = do_normalize UpperCAmelCase_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase_ : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase_ : List[str] = do_convert_rgb def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Tuple , ) -> np.ndarray: UpperCAmelCase_ : Dict = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) UpperCAmelCase_ : Dict = get_resize_output_image_size(lowerCAmelCase_ , size=size["shortest_edge"] , default_to_square=lowerCAmelCase_ ) return resize(lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray: UpperCAmelCase_ : Optional[int] = get_size_dict(lowerCAmelCase_ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(lowerCAmelCase_ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> int: return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray: return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : int = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : float = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : Optional[Union[float, List[float]]] = None , lowerCAmelCase_ : bool = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : List[str] , ) -> PIL.Image.Image: UpperCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_ : str = size if size is not None else self.size UpperCAmelCase_ : int = get_size_dict(lowerCAmelCase_ , param_name="size" , default_to_square=lowerCAmelCase_ ) UpperCAmelCase_ : int = resample if resample is not None else self.resample UpperCAmelCase_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_ : Any = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_ : Optional[Any] = get_size_dict(lowerCAmelCase_ , param_name="crop_size" , default_to_square=lowerCAmelCase_ ) UpperCAmelCase_ : Any = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_ : Dict = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std UpperCAmelCase_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase_ : Any = make_list_of_images(lowerCAmelCase_ ) if not valid_images(lowerCAmelCase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase_ : List[str] = [convert_to_rgb(lowerCAmelCase_ ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase_ : str = [to_numpy_array(lowerCAmelCase_ ) for image in images] if do_resize: UpperCAmelCase_ : List[Any] = [self.resize(image=lowerCAmelCase_ , size=lowerCAmelCase_ , resample=lowerCAmelCase_ ) for image in images] if do_center_crop: UpperCAmelCase_ : Dict = [self.center_crop(image=lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images] if do_rescale: UpperCAmelCase_ : Optional[Any] = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images] if do_normalize: UpperCAmelCase_ : Dict = [self.normalize(image=lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ ) for image in images] UpperCAmelCase_ : Optional[int] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images] UpperCAmelCase_ : str = {"pixel_values": images} return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
268
"""simple docstring""" import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
268
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''', '''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''', '''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''', '''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''', '''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''', '''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''', '''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''', '''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''', '''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''', '''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''xlm''' __magic_name__ = { '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__( self : str , lowerCAmelCase_ : Any=30_145 , lowerCAmelCase_ : str=2_048 , lowerCAmelCase_ : Any=12 , lowerCAmelCase_ : str=16 , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : int=0.1 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Any=False , lowerCAmelCase_ : Optional[int]=False , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Optional[int]=1 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int=512 , lowerCAmelCase_ : int=2_048**-0.5 , lowerCAmelCase_ : List[str]=1e-12 , lowerCAmelCase_ : List[str]=0.0_2 , lowerCAmelCase_ : str=0 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : str=2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[Any]="first" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Any=0.1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : str=5 , lowerCAmelCase_ : List[str]=0 , lowerCAmelCase_ : Dict=0 , lowerCAmelCase_ : List[Any]=2 , lowerCAmelCase_ : Any=0 , **lowerCAmelCase_ : str , ) -> Any: UpperCAmelCase_ : Optional[int] = vocab_size UpperCAmelCase_ : Dict = emb_dim UpperCAmelCase_ : int = n_layers UpperCAmelCase_ : int = n_heads UpperCAmelCase_ : Any = dropout UpperCAmelCase_ : List[Any] = attention_dropout UpperCAmelCase_ : List[Any] = gelu_activation UpperCAmelCase_ : List[str] = sinusoidal_embeddings UpperCAmelCase_ : Any = causal UpperCAmelCase_ : Dict = asm UpperCAmelCase_ : str = n_langs UpperCAmelCase_ : Any = use_lang_emb UpperCAmelCase_ : Optional[int] = layer_norm_eps UpperCAmelCase_ : str = bos_index UpperCAmelCase_ : List[str] = eos_index UpperCAmelCase_ : Dict = pad_index UpperCAmelCase_ : Tuple = unk_index UpperCAmelCase_ : str = mask_index UpperCAmelCase_ : Dict = is_encoder UpperCAmelCase_ : List[str] = max_position_embeddings UpperCAmelCase_ : List[str] = embed_init_std UpperCAmelCase_ : str = init_std UpperCAmelCase_ : Any = summary_type UpperCAmelCase_ : Any = summary_use_proj UpperCAmelCase_ : Optional[Any] = summary_activation UpperCAmelCase_ : Union[str, Any] = summary_proj_to_labels UpperCAmelCase_ : Tuple = summary_first_dropout UpperCAmelCase_ : Any = start_n_top UpperCAmelCase_ : Any = end_n_top UpperCAmelCase_ : int = mask_token_id UpperCAmelCase_ : Union[str, Any] = lang_id if "n_words" in kwargs: UpperCAmelCase_ : Union[str, Any] = kwargs["n_words"] super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": UpperCAmelCase_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase_ : Tuple = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
268
"""simple docstring""" from __future__ import annotations class UpperCamelCase_ : def __init__( self : Any , lowerCAmelCase_ : int ) -> None: UpperCAmelCase_ : Any = data UpperCAmelCase_ : Node | None = None UpperCAmelCase_ : Node | None = None def snake_case ( A__ ): # In Order traversal of the tree if tree: display(tree.left ) print(tree.data ) display(tree.right ) def snake_case ( A__ ): return 1 + max(depth_of_tree(tree.left ) ,depth_of_tree(tree.right ) ) if tree else 0 def snake_case ( A__ ): if not tree: return True if tree.left and tree.right: return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right ) else: return not tree.left and not tree.right def snake_case ( ): # Main function for testing. UpperCAmelCase_ : List[str] = Node(1 ) UpperCAmelCase_ : Any = Node(2 ) UpperCAmelCase_ : Optional[Any] = Node(3 ) UpperCAmelCase_ : Union[str, Any] = Node(4 ) UpperCAmelCase_ : int = Node(5 ) UpperCAmelCase_ : Optional[int] = Node(6 ) UpperCAmelCase_ : Any = Node(7 ) UpperCAmelCase_ : List[str] = Node(8 ) UpperCAmelCase_ : List[Any] = Node(9 ) print(is_full_binary_tree(A__ ) ) print(depth_of_tree(A__ ) ) print("Tree is: " ) display(A__ ) if __name__ == "__main__": main()
268
1
"""simple docstring""" def snake_case ( A__ = 10**9 ): UpperCAmelCase_ : Optional[Any] = 1 UpperCAmelCase_ : Optional[Any] = 2 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Optional[int] = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value UpperCAmelCase_ : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f'{solution() = }')
268
"""simple docstring""" def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ): if index == number_of_items: return 0 UpperCAmelCase_ : str = 0 UpperCAmelCase_ : Dict = 0 UpperCAmelCase_ : Tuple = knapsack(A__ ,A__ ,A__ ,A__ ,index + 1 ) if weights[index] <= max_weight: UpperCAmelCase_ : Union[str, Any] = values[index] + knapsack( A__ ,A__ ,A__ ,max_weight - weights[index] ,index + 1 ) return max(A__ ,A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
1
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def snake_case ( A__ ,A__ ): UpperCAmelCase_ : int = iter(A__ ) while True: UpperCAmelCase_ : Union[str, Any] = tuple(itertools.islice(A__ ,A__ ) ) if not chunk: return yield chunk def snake_case ( A__ ): UpperCAmelCase_ : Union[str, Any] = "".join([c.upper() for c in dirty if c in string.ascii_letters] ) UpperCAmelCase_ : Tuple = "" if len(A__ ) < 2: return dirty for i in range(len(A__ ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(A__ ) & 1: clean += "X" return clean def snake_case ( A__ ): # I and J are used interchangeably to allow # us to use a 5x5 table (25 letters) UpperCAmelCase_ : Any = "ABCDEFGHIKLMNOPQRSTUVWXYZ" # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler UpperCAmelCase_ : Union[str, Any] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(A__ ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(A__ ) return table def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Any = generate_table(A__ ) UpperCAmelCase_ : Dict = prepare_input(A__ ) UpperCAmelCase_ : Union[str, Any] = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(A__ ,2 ): UpperCAmelCase_ , UpperCAmelCase_ : Tuple = divmod(table.index(A__ ) ,5 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = divmod(table.index(A__ ) ,5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def snake_case ( A__ ,A__ ): UpperCAmelCase_ : Any = generate_table(A__ ) UpperCAmelCase_ : List[str] = "" # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(A__ ,2 ): UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = divmod(table.index(A__ ) ,5 ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = divmod(table.index(A__ ) ,5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
268
"""simple docstring""" import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ (__A ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict=768 ) -> List[Any]: super().__init__(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = proj_size UpperCAmelCase_ : Optional[Any] = CLIPVisionModel(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = PaintByExampleMapper(lowerCAmelCase_ ) UpperCAmelCase_ : str = nn.LayerNorm(config.hidden_size ) UpperCAmelCase_ : List[Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling UpperCAmelCase_ : Optional[int] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Dict=False ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = self.model(pixel_values=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = clip_output.pooler_output UpperCAmelCase_ : List[Any] = self.mapper(latent_states[:, None] ) UpperCAmelCase_ : List[str] = self.final_layer_norm(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.proj_out(lowerCAmelCase_ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class UpperCamelCase_ (nn.Module ): def __init__( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Tuple: super().__init__() UpperCAmelCase_ : List[Any] = (config.num_hidden_layers + 1) // 5 UpperCAmelCase_ : Optional[Any] = config.hidden_size UpperCAmelCase_ : List[str] = 1 UpperCAmelCase_ : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , activation_fn="gelu" , attention_bias=lowerCAmelCase_ ) for _ in range(lowerCAmelCase_ ) ] ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[str] ) -> str: for block in self.blocks: UpperCAmelCase_ : int = block(lowerCAmelCase_ ) return hidden_states
268
1
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def snake_case ( A__ ): return (data["data"], data["target"]) def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Optional[Any] = XGBRegressor(verbosity=0 ,random_state=42 ) xgb.fit(A__ ,A__ ) # Predict target for test data UpperCAmelCase_ : List[Any] = xgb.predict(A__ ) UpperCAmelCase_ : int = predictions.reshape(len(A__ ) ,1 ) return predictions def snake_case ( ): UpperCAmelCase_ : str = fetch_california_housing() UpperCAmelCase_ , UpperCAmelCase_ : Tuple = data_handling(A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = train_test_split( A__ ,A__ ,test_size=0.25 ,random_state=1 ) UpperCAmelCase_ : Dict = xgboost(A__ ,A__ ,A__ ) # Error printing print(F"""Mean Absolute Error : {mean_absolute_error(A__ ,A__ )}""" ) print(F"""Mean Square Error : {mean_squared_error(A__ ,A__ )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
268
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase_ = logging.get_logger(__name__) @add_end_docstrings(__A ) class UpperCamelCase_ (__A ): def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]: super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]: UpperCAmelCase_ : str = {} if top_k is not None: UpperCAmelCase_ : List[str] = top_k return {}, {}, postprocess_params def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple: return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any: UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str: UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ ) return model_outputs def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any: if top_k > self.model.config.num_labels: UpperCAmelCase_ : int = self.model.config.num_labels if self.framework == "pt": UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0] UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ ) elif self.framework == "tf": UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0] UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ ) UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"""Unsupported framework: {self.framework}""" ) UpperCAmelCase_ : int = scores.tolist() UpperCAmelCase_ : Optional[Any] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
268
1
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''', # See all DETR models at https://huggingface.co/models?filter=detr } class UpperCamelCase_ (__A ): __magic_name__ = '''detr''' __magic_name__ = ['''past_key_values'''] __magic_name__ = { '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self : Dict , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=100 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : int=8 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : Union[str, Any]=2_048 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Union[str, Any]=0.0 , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[Any]="relu" , lowerCAmelCase_ : List[Any]=256 , lowerCAmelCase_ : Tuple=0.1 , lowerCAmelCase_ : Optional[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0 , lowerCAmelCase_ : List[Any]=0.0_2 , lowerCAmelCase_ : Optional[int]=1.0 , lowerCAmelCase_ : str=False , lowerCAmelCase_ : Tuple="sine" , lowerCAmelCase_ : str="resnet50" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[Any]=1 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : List[Any]=1 , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Union[str, Any]=0.1 , **lowerCAmelCase_ : Dict , ) -> int: if backbone_config is not None and use_timm_backbone: raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." ) if not use_timm_backbone: if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) UpperCAmelCase_ : Tuple = CONFIG_MAPPING["resnet"](out_features=["stage4"] ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): UpperCAmelCase_ : Dict = backbone_config.get("model_type" ) UpperCAmelCase_ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase_ : Tuple = config_class.from_dict(lowerCAmelCase_ ) # set timm attributes to None UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = None, None, None UpperCAmelCase_ : str = use_timm_backbone UpperCAmelCase_ : Optional[Any] = backbone_config UpperCAmelCase_ : Tuple = num_channels UpperCAmelCase_ : Dict = num_queries UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = encoder_ffn_dim UpperCAmelCase_ : Union[str, Any] = encoder_layers UpperCAmelCase_ : Optional[int] = encoder_attention_heads UpperCAmelCase_ : List[str] = decoder_ffn_dim UpperCAmelCase_ : Tuple = decoder_layers UpperCAmelCase_ : Optional[int] = decoder_attention_heads UpperCAmelCase_ : List[Any] = dropout UpperCAmelCase_ : Union[str, Any] = attention_dropout UpperCAmelCase_ : int = activation_dropout UpperCAmelCase_ : List[str] = activation_function UpperCAmelCase_ : Optional[int] = init_std UpperCAmelCase_ : Union[str, Any] = init_xavier_std UpperCAmelCase_ : List[str] = encoder_layerdrop UpperCAmelCase_ : Tuple = decoder_layerdrop UpperCAmelCase_ : str = encoder_layers UpperCAmelCase_ : Any = auxiliary_loss UpperCAmelCase_ : Optional[int] = position_embedding_type UpperCAmelCase_ : List[str] = backbone UpperCAmelCase_ : int = use_pretrained_backbone UpperCAmelCase_ : Any = dilation # Hungarian matcher UpperCAmelCase_ : str = class_cost UpperCAmelCase_ : Any = bbox_cost UpperCAmelCase_ : int = giou_cost # Loss coefficients UpperCAmelCase_ : List[str] = mask_loss_coefficient UpperCAmelCase_ : Dict = dice_loss_coefficient UpperCAmelCase_ : Any = bbox_loss_coefficient UpperCAmelCase_ : Union[str, Any] = giou_loss_coefficient UpperCAmelCase_ : int = eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ ) @property def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int: return self.encoder_attention_heads @property def _SCREAMING_SNAKE_CASE ( self : int ) -> int: return self.d_model @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Tuple ) -> List[Any]: return cls(backbone_config=lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict[str, any]: UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase_ : Any = self.__class__.model_type return output class UpperCamelCase_ (__A ): __magic_name__ = version.parse('''1.11''' ) @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ("pixel_mask", {0: "batch"}), ] ) @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> float: return 1e-5 @property def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: return 12
268
1
"""simple docstring""" import os def snake_case ( A__ = "matrix.txt" ): with open(os.path.join(os.path.dirname(A__ ) ,A__ ) ) as in_file: UpperCAmelCase_ : Optional[Any] = in_file.read() UpperCAmelCase_ : Optional[int] = [[int(A__ ) for cell in row.split("," )] for row in data.strip().splitlines()] UpperCAmelCase_ : List[str] = [[0 for cell in row] for row in grid] UpperCAmelCase_ : Optional[Any] = len(grid[0] ) UpperCAmelCase_ : List[Any] = [[0 for i in range(A__ )] for j in range(A__ )] UpperCAmelCase_ : List[Any] = grid[0][0] for i in range(1 ,A__ ): UpperCAmelCase_ : List[str] = grid[0][i] + dp[0][i - 1] for i in range(1 ,A__ ): UpperCAmelCase_ : Union[str, Any] = grid[i][0] + dp[i - 1][0] for i in range(1 ,A__ ): for j in range(1 ,A__ ): UpperCAmelCase_ : str = grid[i][j] + min(dp[i - 1][j] ,dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(f'{solution() = }')
268
"""simple docstring""" import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase_ (__A , unittest.TestCase ): __magic_name__ = BertTokenizer __magic_name__ = BertTokenizerFast __magic_name__ = True __magic_name__ = True __magic_name__ = filter_non_english def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]: super().setUp() UpperCAmelCase_ : Tuple = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] UpperCAmelCase_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> Union[str, Any]: UpperCAmelCase_ : Tuple = "UNwant\u00E9d,running" UpperCAmelCase_ : Any = "unwanted, running" return input_text, output_text def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file ) UpperCAmelCase_ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: if not self.test_rust_tokenizer: return UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer() UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : int = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) # With lower casing UpperCAmelCase_ : Tuple = self.get_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = "UNwant\u00E9d,running" UpperCAmelCase_ : List[Any] = tokenizer.tokenize(lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = rust_tokenizer.tokenize(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : Optional[Any] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any: UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def _SCREAMING_SNAKE_CASE ( self : int ) -> int: UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str: UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int: UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_ : Tuple = BasicTokenizer() UpperCAmelCase_ : Dict = "a\n'll !!to?'d of, can't." UpperCAmelCase_ : List[str] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."] self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_ : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] UpperCAmelCase_ : Tuple = {} for i, token in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = i UpperCAmelCase_ : Optional[Any] = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]: UpperCAmelCase_ : Dict = self.get_tokenizer() UpperCAmelCase_ : List[str] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("bert-base-uncased" ) UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence.""" UpperCAmelCase_ : Tuple = tokenizer_r.encode_plus( lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , ) UpperCAmelCase_ : Optional[int] = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False UpperCAmelCase_ : List[Any] = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = ["的", "人", "有"] UpperCAmelCase_ : Tuple = "".join(lowerCAmelCase_ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase_ : Optional[Any] = True UpperCAmelCase_ : Any = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : Any = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = False UpperCAmelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ ) # it is expected that only the first Chinese character is not preceded by "##". UpperCAmelCase_ : Tuple = [ f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ ) ] self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ ) self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
268
1
"""simple docstring""" from __future__ import annotations def snake_case ( A__ ): return len(set(A__ ) ) == len(A__ ) if __name__ == "__main__": import doctest doctest.testmod()
268
"""simple docstring""" from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class UpperCamelCase_ (__A ): __magic_name__ = '''t5''' __magic_name__ = ['''past_key_values'''] __magic_name__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , lowerCAmelCase_ : List[Any]=32_128 , lowerCAmelCase_ : Tuple=512 , lowerCAmelCase_ : Optional[int]=64 , lowerCAmelCase_ : List[str]=2_048 , lowerCAmelCase_ : Tuple=6 , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : str=8 , lowerCAmelCase_ : Optional[int]=32 , lowerCAmelCase_ : Dict=128 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : str=1e-6 , lowerCAmelCase_ : Dict=1.0 , lowerCAmelCase_ : str="relu" , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=1 , **lowerCAmelCase_ : Optional[int] , ) -> int: UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : Optional[Any] = d_model UpperCAmelCase_ : str = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : int = num_layers UpperCAmelCase_ : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[Any] = num_heads UpperCAmelCase_ : Any = relative_attention_num_buckets UpperCAmelCase_ : Optional[Any] = relative_attention_max_distance UpperCAmelCase_ : Optional[Any] = dropout_rate UpperCAmelCase_ : Tuple = layer_norm_epsilon UpperCAmelCase_ : int = initializer_factor UpperCAmelCase_ : int = feed_forward_proj UpperCAmelCase_ : str = use_cache UpperCAmelCase_ : Tuple = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : List[Any] = act_info[-1] UpperCAmelCase_ : Optional[int] = act_info[0] == "gated" if len(lowerCAmelCase_ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase_ ) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : int = "gelu_new" super().__init__( pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , **lowerCAmelCase_ , ) class UpperCamelCase_ (__A ): @property def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Mapping[str, Mapping[int, str]]: UpperCAmelCase_ : Any = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : List[Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Union[str, Any] = {0: "batch"} UpperCAmelCase_ : Optional[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : Tuple = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase_ , direction="inputs" ) return common_inputs @property def _SCREAMING_SNAKE_CASE ( self : Any ) -> int: return 13
268
1
"""simple docstring""" import argparse import json from tqdm import tqdm def snake_case ( ): UpperCAmelCase_ : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path" ,type=A__ ,default="biencoder-nq-dev.json" ,help="Path to raw DPR training data" ,) parser.add_argument( "--evaluation_set" ,type=A__ ,help="where to store parsed evaluation_set file" ,) parser.add_argument( "--gold_data_path" ,type=A__ ,help="where to store parsed gold_data_path file" ,) UpperCAmelCase_ : Optional[int] = parser.parse_args() with open(args.src_path ,"r" ) as src_file, open(args.evaluation_set ,"w" ) as eval_file, open( args.gold_data_path ,"w" ) as gold_file: UpperCAmelCase_ : Union[str, Any] = json.load(A__ ) for dpr_record in tqdm(A__ ): UpperCAmelCase_ : int = dpr_record["question"] UpperCAmelCase_ : Optional[int] = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n" ) gold_file.write("\t".join(A__ ) + "\n" ) if __name__ == "__main__": main()
268
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class UpperCamelCase_ : # setable values __magic_name__ = None __magic_name__ = None __magic_name__ = None # sigma(t_i) @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> Optional[Any]: return cls() @dataclass class UpperCamelCase_ (__A ): __magic_name__ = 42 __magic_name__ = 42 __magic_name__ = 42 class UpperCamelCase_ (__A , __A ): @property def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str: return True @register_to_config def __init__( self : List[str] , lowerCAmelCase_ : float = 0.0_2 , lowerCAmelCase_ : float = 100 , lowerCAmelCase_ : float = 1.0_0_7 , lowerCAmelCase_ : float = 80 , lowerCAmelCase_ : float = 0.0_5 , lowerCAmelCase_ : float = 50 , ) -> Union[str, Any]: pass def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: return KarrasVeSchedulerState.create() def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = () ) -> KarrasVeSchedulerState: UpperCAmelCase_ : Dict = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy() UpperCAmelCase_ : Dict = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: if self.config.s_min <= sigma <= self.config.s_max: UpperCAmelCase_ : Any = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: UpperCAmelCase_ : Optional[int] = 0 # sample eps ~ N(0, S_noise^2 * I) UpperCAmelCase_ : List[Any] = random.split(lowerCAmelCase_ , num=1 ) UpperCAmelCase_ : List[str] = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape ) UpperCAmelCase_ : Optional[Any] = sigma + gamma * sigma UpperCAmelCase_ : Any = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : Union[str, Any] = sample_hat + sigma_hat * model_output UpperCAmelCase_ : List[Any] = (sample_hat - pred_original_sample) / sigma_hat UpperCAmelCase_ : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: UpperCAmelCase_ : str = sample_prev + sigma_prev * model_output UpperCAmelCase_ : Any = (sample_prev - pred_original_sample) / sigma_prev UpperCAmelCase_ : int = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : KarrasVeSchedulerState , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ) -> Dict: raise NotImplementedError()
268
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional import numpy as np import torch import torch.nn as nn from ..utils import BaseOutput, is_torch_version, randn_tensor from .attention_processor import SpatialNorm from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block @dataclass class UpperCamelCase_ (__A ): __magic_name__ = 42 class UpperCamelCase_ (nn.Module ): def __init__( self : Optional[int] , lowerCAmelCase_ : List[str]=3 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : List[str]=("DownEncoderBlock2D",) , lowerCAmelCase_ : str=(64,) , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Any=32 , lowerCAmelCase_ : str="silu" , lowerCAmelCase_ : Optional[int]=True , ) -> List[Any]: super().__init__() UpperCAmelCase_ : Optional[int] = layers_per_block UpperCAmelCase_ : Tuple = torch.nn.Convad( lowerCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , ) UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : Optional[Any] = nn.ModuleList([] ) # down UpperCAmelCase_ : List[str] = block_out_channels[0] for i, down_block_type in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Union[str, Any] = output_channel UpperCAmelCase_ : Dict = block_out_channels[i] UpperCAmelCase_ : Optional[int] = i == len(lowerCAmelCase_ ) - 1 UpperCAmelCase_ : Optional[int] = get_down_block( lowerCAmelCase_ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , ) self.down_blocks.append(lowerCAmelCase_ ) # mid UpperCAmelCase_ : Any = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , ) # out UpperCAmelCase_ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase_ , eps=1e-6 ) UpperCAmelCase_ : List[str] = nn.SiLU() UpperCAmelCase_ : Any = 2 * out_channels if double_z else out_channels UpperCAmelCase_ : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase_ , 3 , padding=1 ) UpperCAmelCase_ : Optional[int] = False def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : int ) -> str: UpperCAmelCase_ : Dict = x UpperCAmelCase_ : int = self.conv_in(lowerCAmelCase_ ) if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCAmelCase_ : Optional[Any] ): def custom_forward(*lowerCAmelCase_ : Optional[Any] ): return module(*lowerCAmelCase_ ) return custom_forward # down if is_torch_version(">=" , "1.11.0" ): for down_block in self.down_blocks: UpperCAmelCase_ : Dict = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) # middle UpperCAmelCase_ : Union[str, Any] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) else: for down_block in self.down_blocks: UpperCAmelCase_ : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ ) # middle UpperCAmelCase_ : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase_ ) else: # down for down_block in self.down_blocks: UpperCAmelCase_ : Optional[Any] = down_block(lowerCAmelCase_ ) # middle UpperCAmelCase_ : Tuple = self.mid_block(lowerCAmelCase_ ) # post-process UpperCAmelCase_ : List[str] = self.conv_norm_out(lowerCAmelCase_ ) UpperCAmelCase_ : str = self.conv_act(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.conv_out(lowerCAmelCase_ ) return sample class UpperCamelCase_ (nn.Module ): def __init__( self : str , lowerCAmelCase_ : Union[str, Any]=3 , lowerCAmelCase_ : Any=3 , lowerCAmelCase_ : Optional[int]=("UpDecoderBlock2D",) , lowerCAmelCase_ : Dict=(64,) , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=32 , lowerCAmelCase_ : Dict="silu" , lowerCAmelCase_ : Union[str, Any]="group" , ) -> int: super().__init__() UpperCAmelCase_ : Optional[int] = layers_per_block UpperCAmelCase_ : List[Any] = nn.Convad( lowerCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , ) UpperCAmelCase_ : str = None UpperCAmelCase_ : Optional[Any] = nn.ModuleList([] ) UpperCAmelCase_ : List[str] = in_channels if norm_type == "spatial" else None # mid UpperCAmelCase_ : List[str] = UNetMidBlockaD( in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , ) # up UpperCAmelCase_ : Optional[int] = list(reversed(lowerCAmelCase_ ) ) UpperCAmelCase_ : Tuple = reversed_block_out_channels[0] for i, up_block_type in enumerate(lowerCAmelCase_ ): UpperCAmelCase_ : Optional[int] = output_channel UpperCAmelCase_ : int = reversed_block_out_channels[i] UpperCAmelCase_ : str = i == len(lowerCAmelCase_ ) - 1 UpperCAmelCase_ : int = get_up_block( lowerCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , prev_output_channel=lowerCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCAmelCase_ , resnet_groups=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , temb_channels=lowerCAmelCase_ , resnet_time_scale_shift=lowerCAmelCase_ , ) self.up_blocks.append(lowerCAmelCase_ ) UpperCAmelCase_ : str = output_channel # out if norm_type == "spatial": UpperCAmelCase_ : List[str] = SpatialNorm(block_out_channels[0] , lowerCAmelCase_ ) else: UpperCAmelCase_ : List[str] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase_ , eps=1e-6 ) UpperCAmelCase_ : List[Any] = nn.SiLU() UpperCAmelCase_ : Optional[Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase_ , 3 , padding=1 ) UpperCAmelCase_ : List[str] = False def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict=None ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = z UpperCAmelCase_ : Any = self.conv_in(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype if self.training and self.gradient_checkpointing: def create_custom_forward(lowerCAmelCase_ : Optional[int] ): def custom_forward(*lowerCAmelCase_ : Optional[int] ): return module(*lowerCAmelCase_ ) return custom_forward if is_torch_version(">=" , "1.11.0" ): # middle UpperCAmelCase_ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = sample.to(lowerCAmelCase_ ) # up for up_block in self.up_blocks: UpperCAmelCase_ : Optional[int] = torch.utils.checkpoint.checkpoint( create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , use_reentrant=lowerCAmelCase_ ) else: # middle UpperCAmelCase_ : List[str] = torch.utils.checkpoint.checkpoint( create_custom_forward(self.mid_block ) , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Any = sample.to(lowerCAmelCase_ ) # up for up_block in self.up_blocks: UpperCAmelCase_ : int = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ ) else: # middle UpperCAmelCase_ : Optional[Any] = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = sample.to(lowerCAmelCase_ ) # up for up_block in self.up_blocks: UpperCAmelCase_ : Tuple = up_block(lowerCAmelCase_ , lowerCAmelCase_ ) # post-process if latent_embeds is None: UpperCAmelCase_ : Optional[int] = self.conv_norm_out(lowerCAmelCase_ ) else: UpperCAmelCase_ : Tuple = self.conv_norm_out(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.conv_act(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = self.conv_out(lowerCAmelCase_ ) return sample class UpperCamelCase_ (nn.Module ): def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : List[Any]="random" , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : str=True ) -> Any: super().__init__() UpperCAmelCase_ : List[str] = n_e UpperCAmelCase_ : List[Any] = vq_embed_dim UpperCAmelCase_ : List[str] = beta UpperCAmelCase_ : Union[str, Any] = legacy UpperCAmelCase_ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim ) self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e ) UpperCAmelCase_ : List[str] = remap if self.remap is not None: self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) ) UpperCAmelCase_ : List[Any] = self.used.shape[0] UpperCAmelCase_ : Optional[Any] = unknown_index # "random" or "extra" or integer if self.unknown_index == "extra": UpperCAmelCase_ : Union[str, Any] = self.re_embed UpperCAmelCase_ : Optional[Any] = self.re_embed + 1 print( f"""Remapping {self.n_e} indices to {self.re_embed} indices. """ f"""Using {self.unknown_index} for unknown indices.""" ) else: UpperCAmelCase_ : List[str] = n_e UpperCAmelCase_ : Optional[Any] = sane_index_shape def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : List[Any] ) -> Any: UpperCAmelCase_ : str = inds.shape assert len(lowerCAmelCase_ ) > 1 UpperCAmelCase_ : int = inds.reshape(ishape[0] , -1 ) UpperCAmelCase_ : Tuple = self.used.to(lowerCAmelCase_ ) UpperCAmelCase_ : Any = (inds[:, :, None] == used[None, None, ...]).long() UpperCAmelCase_ : Optional[Any] = match.argmax(-1 ) UpperCAmelCase_ : Optional[Any] = match.sum(2 ) < 1 if self.unknown_index == "random": UpperCAmelCase_ : List[str] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device ) else: UpperCAmelCase_ : List[str] = self.unknown_index return new.reshape(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple ) -> Any: UpperCAmelCase_ : str = inds.shape assert len(lowerCAmelCase_ ) > 1 UpperCAmelCase_ : Dict = inds.reshape(ishape[0] , -1 ) UpperCAmelCase_ : str = self.used.to(lowerCAmelCase_ ) if self.re_embed > self.used.shape[0]: # extra token UpperCAmelCase_ : Union[str, Any] = 0 # simply set to zero UpperCAmelCase_ : Tuple = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase_ ) return back.reshape(lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Optional[Any]: # reshape z -> (batch, height, width, channel) and flatten UpperCAmelCase_ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous() UpperCAmelCase_ : Optional[Any] = z.view(-1 , self.vq_embed_dim ) # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z UpperCAmelCase_ : Optional[Any] = torch.argmin(torch.cdist(lowerCAmelCase_ , self.embedding.weight ) , dim=1 ) UpperCAmelCase_ : List[Any] = self.embedding(lowerCAmelCase_ ).view(z.shape ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : List[Any] = None # compute loss for embedding if not self.legacy: UpperCAmelCase_ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 ) else: UpperCAmelCase_ : Optional[int] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 ) # preserve gradients UpperCAmelCase_ : str = z + (z_q - z).detach() # reshape back to match original input shape UpperCAmelCase_ : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous() if self.remap is not None: UpperCAmelCase_ : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis UpperCAmelCase_ : str = self.remap_to_used(lowerCAmelCase_ ) UpperCAmelCase_ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten if self.sane_index_shape: UpperCAmelCase_ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] ) return z_q, loss, (perplexity, min_encodings, min_encoding_indices) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: # shape specifying (batch, height, width, channel) if self.remap is not None: UpperCAmelCase_ : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis UpperCAmelCase_ : str = self.unmap_to_all(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = indices.reshape(-1 ) # flatten again # get quantized latent vectors UpperCAmelCase_ : Any = self.embedding(lowerCAmelCase_ ) if shape is not None: UpperCAmelCase_ : List[Any] = z_q.view(lowerCAmelCase_ ) # reshape back to match original input shape UpperCAmelCase_ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous() return z_q class UpperCamelCase_ (__A ): def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=False ) -> List[Any]: UpperCAmelCase_ : int = parameters UpperCAmelCase_ , UpperCAmelCase_ : Any = torch.chunk(lowerCAmelCase_ , 2 , dim=1 ) UpperCAmelCase_ : Dict = torch.clamp(self.logvar , -3_0.0 , 2_0.0 ) UpperCAmelCase_ : List[Any] = deterministic UpperCAmelCase_ : Tuple = torch.exp(0.5 * self.logvar ) UpperCAmelCase_ : List[str] = torch.exp(self.logvar ) if self.deterministic: UpperCAmelCase_ : List[str] = torch.zeros_like( self.mean , device=self.parameters.device , dtype=self.parameters.dtype ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[torch.Generator] = None ) -> torch.FloatTensor: # make sure sample is on the same device as the parameters and has same dtype UpperCAmelCase_ : int = randn_tensor( self.mean.shape , generator=lowerCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype ) UpperCAmelCase_ : Optional[int] = self.mean + self.std * sample return x def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Optional[Any]=None ) -> List[Any]: if self.deterministic: return torch.Tensor([0.0] ) else: if other is None: return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] ) else: return 0.5 * torch.sum( torch.pow(self.mean - other.mean , 2 ) / other.var + self.var / other.var - 1.0 - self.logvar + other.logvar , dim=[1, 2, 3] , ) def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str=[1, 2, 3] ) -> Optional[Any]: if self.deterministic: return torch.Tensor([0.0] ) UpperCAmelCase_ : str = np.log(2.0 * np.pi ) return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int: return self.mean
268
"""simple docstring""" def snake_case ( A__ ,A__ ): # "extended trapezoidal rule" # int(f) = dx/2 * (f1 + 2f2 + ... + fn) UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps UpperCAmelCase_ : Optional[int] = boundary[0] UpperCAmelCase_ : str = boundary[1] UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ ) UpperCAmelCase_ : List[str] = 0.0 y += (h / 2.0) * f(A__ ) for i in x_i: # print(i) y += h * f(A__ ) y += (h / 2.0) * f(A__ ) return y def snake_case ( A__ ,A__ ,A__ ): UpperCAmelCase_ : Union[str, Any] = a + h while x < (b - h): yield x UpperCAmelCase_ : Optional[Any] = x + h def snake_case ( A__ ): # enter your function here UpperCAmelCase_ : Dict = (x - 0) * (x - 0) return y def snake_case ( ): UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ ) print(F"""y = {y}""" ) if __name__ == "__main__": main()
268
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCamelCase_ (__A ): __magic_name__ = '''WhisperFeatureExtractor''' __magic_name__ = '''WhisperTokenizer''' def __init__( self : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ) -> str: super().__init__(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : List[Any] = self.feature_extractor UpperCAmelCase_ : List[str] = False def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[str]=True ) -> Dict: return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase_ , language=lowerCAmelCase_ , no_timestamps=lowerCAmelCase_ ) def __call__( self : List[Any] , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : str = kwargs.pop("audio" , lowerCAmelCase_ ) UpperCAmelCase_ : int = kwargs.pop("sampling_rate" , lowerCAmelCase_ ) UpperCAmelCase_ : Any = kwargs.pop("text" , lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: UpperCAmelCase_ : int = args[0] UpperCAmelCase_ : Optional[int] = args[1:] if audio is None and text is None: raise ValueError("You need to specify either an `audio` or `text` input to process." ) if audio is not None: UpperCAmelCase_ : Any = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ ) if text is not None: UpperCAmelCase_ : Tuple = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ ) if text is None: return inputs elif audio is None: return encodings else: UpperCAmelCase_ : int = encodings["input_ids"] return inputs def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , *lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : int ) -> List[Any]: return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[Any]: return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : str="np" ) -> str: return self.tokenizer.get_prompt_ids(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
268
"""simple docstring""" from __future__ import annotations import time from collections.abc import Sequence from random import randint from matplotlib import pyplot as plt def snake_case ( A__ ,A__ ,A__ ): if not arr: return None, None, 0 if low == high: return low, high, arr[low] UpperCAmelCase_ : Dict = (low + high) // 2 UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = max_subarray(A__ ,A__ ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = max_subarray(A__ ,mid + 1 ,A__ ) UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(A__ ,A__ ,A__ ,A__ ) if left_sum >= right_sum and left_sum >= cross_sum: return left_low, left_high, left_sum elif right_sum >= left_sum and right_sum >= cross_sum: return right_low, right_high, right_sum return cross_left, cross_right, cross_sum def snake_case ( A__ ,A__ ,A__ ,A__ ): UpperCAmelCase_ , UpperCAmelCase_ : str = float("-inf" ), -1 UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = float("-inf" ), -1 UpperCAmelCase_ : int | float = 0 for i in range(A__ ,low - 1 ,-1 ): summ += arr[i] if summ > left_sum: UpperCAmelCase_ : str = summ UpperCAmelCase_ : Any = i UpperCAmelCase_ : Dict = 0 for i in range(mid + 1 ,high + 1 ): summ += arr[i] if summ > right_sum: UpperCAmelCase_ : List[Any] = summ UpperCAmelCase_ : Optional[Any] = i return max_left, max_right, (left_sum + right_sum) def snake_case ( A__ ): UpperCAmelCase_ : str = [randint(1 ,A__ ) for _ in range(A__ )] UpperCAmelCase_ : str = time.time() max_subarray(A__ ,0 ,input_size - 1 ) UpperCAmelCase_ : int = time.time() return end - start def snake_case ( ): UpperCAmelCase_ : int = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00] UpperCAmelCase_ : List[str] = [time_max_subarray(A__ ) for input_size in input_sizes] print("No of Inputs\t\tTime Taken" ) for input_size, runtime in zip(A__ ,A__ ): print(A__ ,"\t\t" ,A__ ) plt.plot(A__ ,A__ ) plt.xlabel("Number of Inputs" ) plt.ylabel("Time taken in seconds" ) plt.show() if __name__ == "__main__": from doctest import testmod testmod()
268
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: lowerCamelCase_ = None lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} lowerCamelCase_ = { '''vocab_file''': { '''facebook/mbart-large-en-ro''': ( '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model''' ), '''facebook/mbart-large-cc25''': ( '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''', '''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''', }, } lowerCamelCase_ = { '''facebook/mbart-large-en-ro''': 1024, '''facebook/mbart-large-cc25''': 1024, } # fmt: off lowerCamelCase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN'''] class UpperCamelCase_ (__A ): __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = ['''input_ids''', '''attention_mask'''] __magic_name__ = MBartTokenizer __magic_name__ = [] __magic_name__ = [] def __init__( self : List[str] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : List[Any]="<s>" , lowerCAmelCase_ : Optional[Any]="</s>" , lowerCAmelCase_ : str="</s>" , lowerCAmelCase_ : str="<s>" , lowerCAmelCase_ : List[Any]="<unk>" , lowerCAmelCase_ : Tuple="<pad>" , lowerCAmelCase_ : Union[str, Any]="<mask>" , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Optional[Any]=None , **lowerCAmelCase_ : Any , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase_ : Union[str, Any] = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token super().__init__( vocab_file=lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) UpperCAmelCase_ : Tuple = vocab_file UpperCAmelCase_ : str = False if not self.vocab_file else True UpperCAmelCase_ : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} ) UpperCAmelCase_ : Tuple = { lang_code: self.convert_tokens_to_ids(lowerCAmelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES } UpperCAmelCase_ : int = src_lang if src_lang is not None else "en_XX" UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang ) UpperCAmelCase_ : Any = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str: return self._src_lang @src_lang.setter def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : List[Any] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> List[int]: UpperCAmelCase_ : List[str] = [self.sep_token_id] UpperCAmelCase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] , **lowerCAmelCase_ : Union[str, Any] ) -> Dict: if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase_ : str = src_lang UpperCAmelCase_ : str = self(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ) UpperCAmelCase_ : Optional[int] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = tgt_lang_id return inputs def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str = "en_XX" , lowerCAmelCase_ : Optional[List[str]] = None , lowerCAmelCase_ : str = "ro_RO" , **lowerCAmelCase_ : Dict , ) -> BatchEncoding: UpperCAmelCase_ : List[Any] = src_lang UpperCAmelCase_ : Tuple = tgt_lang return super().prepare_seqaseq_batch(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: return self.set_src_lang_special_tokens(self.src_lang ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : List[Any] ) -> None: UpperCAmelCase_ : int = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : str = [] UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : str ) -> None: UpperCAmelCase_ : Union[str, Any] = self.convert_tokens_to_ids(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = [] UpperCAmelCase_ : Optional[int] = [self.eos_token_id, self.cur_lang_code] UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.prefix_tokens ) UpperCAmelCase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens ) UpperCAmelCase_ : Optional[Any] = processors.TemplateProcessing( single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCAmelCase_ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" ) return UpperCAmelCase_ : List[str] = os.path.join( lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ): copyfile(self.vocab_file , lowerCAmelCase_ ) return (out_vocab_file,)
268
"""simple docstring""" from __future__ import annotations import time lowerCamelCase_ = list[tuple[int, int]] lowerCamelCase_ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class UpperCamelCase_ : def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict: UpperCAmelCase_ : Any = pos_x UpperCAmelCase_ : str = pos_y UpperCAmelCase_ : int = (pos_y, pos_x) UpperCAmelCase_ : int = goal_x UpperCAmelCase_ : Tuple = goal_y UpperCAmelCase_ : Union[str, Any] = parent class UpperCamelCase_ : def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple: UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ ) UpperCAmelCase_ : Union[str, Any] = [self.start] UpperCAmelCase_ : int = False def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None: while self.node_queue: UpperCAmelCase_ : str = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: UpperCAmelCase_ : Optional[Any] = True return self.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ ) for node in successors: self.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]: UpperCAmelCase_ : List[str] = [] for action in delta: UpperCAmelCase_ : List[Any] = parent.pos_x + action[1] UpperCAmelCase_ : List[str] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) ) return successors def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path: UpperCAmelCase_ : Union[str, Any] = node UpperCAmelCase_ : Union[str, Any] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) UpperCAmelCase_ : Tuple = current_node.parent path.reverse() return path class UpperCamelCase_ : def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]: UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : Optional[Any] = False def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None: while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 ) UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: UpperCAmelCase_ : str = True return self.retrace_bidirectional_path( lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : str = current_bwd_node UpperCAmelCase_ : List[str] = current_fwd_node UpperCAmelCase_ : Tuple = { self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ), self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(lowerCAmelCase_ ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path: UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ ) UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ ) bwd_path.pop() bwd_path.reverse() UpperCAmelCase_ : str = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() lowerCamelCase_ = (0, 0) lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) lowerCamelCase_ = time.time() lowerCamelCase_ = BreadthFirstSearch(init, goal) lowerCamelCase_ = bfs.search() lowerCamelCase_ = time.time() - start_bfs_time print('''Unidirectional BFS computation time : ''', bfs_time) lowerCamelCase_ = time.time() lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal) lowerCamelCase_ = bd_bfs.search() lowerCamelCase_ = time.time() - start_bd_bfs_time print('''Bidirectional BFS computation time : ''', bd_bfs_time)
268
1