code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" from __future__ import annotations from math import gcd def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int = 2 , _lowerCAmelCase : int = 1 , _lowerCAmelCase : int = 3 , ): '''simple docstring''' if num < 2: raise ValueError('The input value cannot be less than 2' ) # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int: return (pow(_lowerCAmelCase , 2 ) + step) % modulus for _ in range(_lowerCAmelCase ): # These track the position within the cycle detection logic. lowercase__ : Dict = seed lowercase__ : Tuple = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowercase__ : Union[str, Any] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Optional[Any] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Union[str, Any] = rand_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowercase__ : Optional[int] = gcd(hare - tortoise , _lowerCAmelCase ) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowercase__ : Optional[Any] = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse _UpperCamelCase : str = argparse.ArgumentParser() parser.add_argument( "num", type=int, help="The value to find a divisor of", ) parser.add_argument( "--attempts", type=int, default=3, help="The number of attempts before giving up", ) _UpperCamelCase : int = parser.parse_args() _UpperCamelCase : List[Any] = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f'''{args.num} is probably prime''') else: _UpperCamelCase : int = args.num // divisor print(f'''{args.num} = {divisor} * {quotient}''')
645
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any: lowercase__ : List[str] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[int] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : str = is_training lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[Any] = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Any = type_sequence_label_size lowercase__ : Dict = initializer_range lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Tuple = n_targets lowercase__ : Optional[int] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase__ : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase__ : int = [] for i in range(self.batch_size ): lowercase__ : Optional[Any] = {} lowercase__ : Any = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=a ) lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a ) labels.append(a ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> List[Any]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _UpperCAmelCase ( self , a , a , a ) -> int: lowercase__ : List[str] = YolosModel(config=a ) model.to(a ) model.eval() lowercase__ : List[Any] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = YolosForObjectDetection(a ) model.to(a ) model.eval() lowercase__ : Dict = model(pixel_values=a ) lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase__ : str = model(pixel_values=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs lowercase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCamelCase__ : List[str] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Tuple = False lowerCamelCase__ : Union[str, Any] = False def _UpperCAmelCase ( self , a , a , a=False ) -> Dict: lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase__ : Optional[Any] = [] for i in range(self.model_tester.batch_size ): lowercase__ : Dict = {} lowercase__ : Dict = torch.ones( size=(self.model_tester.n_targets,) , device=a , dtype=torch.long ) lowercase__ : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=a , dtype=torch.float ) labels.append(a ) lowercase__ : Union[str, Any] = labels return inputs_dict def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Dict = YolosModelTester(self ) lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: # YOLOS does not use inputs_embeds pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) lowercase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict = True # in YOLOS, the seq_len is different lowercase__ : Tuple = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : str = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : Optional[int] = True lowercase__ : List[Any] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[str] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : Dict = len(a ) # Check attention is always last and order is fine lowercase__ : Any = True lowercase__ : int = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowercase__ : Tuple = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> List[str]: def check_hidden_states_output(a , a , a ): lowercase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(a , a ) ) lowercase__ : int = outputs.hidden_states lowercase__ : Any = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a ) , a ) # YOLOS has a different seq_length lowercase__ : Optional[int] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[Any] = True check_hidden_states_output(a , a , a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*a ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : int = YolosModel.from_pretrained(a ) self.assertIsNotNone(a ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a ) lowercase__ : Tuple = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : int = model(inputs.pixel_values ) # verify outputs lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : Any = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , ) lowercase__ : List[str] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) ) # verify postprocessing lowercase__ : Optional[Any] = image_processor.post_process_object_detection( a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a ) lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7] lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , a ) self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
645
1
"""simple docstring""" from functools import reduce _UpperCamelCase : Tuple = ( "73167176531330624919225119674426574742355349194934" "96983520312774506326239578318016984801869478851843" "85861560789112949495459501737958331952853208805511" "12540698747158523863050715693290963295227443043557" "66896648950445244523161731856403098711121722383113" "62229893423380308135336276614282806444486645238749" "30358907296290491560440772390713810515859307960866" "70172427121883998797908792274921901699720888093776" "65727333001053367881220235421809751254540594752243" "52584907711670556013604839586446706324415722155397" "53697817977846174064955149290862569321978468622482" "83972241375657056057490261407972968652414535100474" "82166370484403199890008895243450658541227588666881" "16427171479924442928230863465674813919123162824586" "17866458359124566529476545682848912883142607690042" "24219022671055626321111109370544217506941658960408" "07198403850962455444362981230987879927244284909188" "84580156166097919133875499200524063689912560717606" "05886116467109405077541002256983155200055935729725" "71636269561882670428252483600823257530420752963450" ) def a_ ( _lowerCAmelCase : str = N ): '''simple docstring''' return max( # mypy cannot properly interpret reduce int(reduce(lambda _lowerCAmelCase , _lowerCAmelCase : str(int(_lowerCAmelCase ) * int(_lowerCAmelCase ) ) , n[i : i + 13] ) ) for i in range(len(_lowerCAmelCase ) - 12 ) ) if __name__ == "__main__": print(f'''{solution() = }''')
645
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _UpperCamelCase : int = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple: lowercase__ : str = load_in_abit lowercase__ : str = load_in_abit lowercase__ : List[str] = llm_inta_threshold lowercase__ : Dict = llm_inta_skip_modules lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload lowercase__ : Any = llm_inta_has_fpaa_weight lowercase__ : Any = bnb_abit_quant_type lowercase__ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: lowercase__ : Dict = torch.floataa elif isinstance(a , a ): lowercase__ : Any = getattr(a , a ) elif isinstance(a , torch.dtype ): lowercase__ : Any = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _UpperCAmelCase ( self ) -> str: if not isinstance(self.llm_inta_threshold , a ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , a ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , a ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , a ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _UpperCAmelCase ( self ) -> Tuple: return self.load_in_abit or self.load_in_abit def _UpperCAmelCase ( self ) -> List[str]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]: lowercase__ : List[Any] = cls(**a ) lowercase__ : Union[str, Any] = [] for key, value in kwargs.items(): if hasattr(a , a ): setattr(a , a , a ) to_remove.append(a ) for key in to_remove: kwargs.pop(a , a ) if return_unused_kwargs: return config, kwargs else: return config def _UpperCAmelCase ( self , a ) -> Dict: with open(a , 'w' , encoding='utf-8' ) as writer: lowercase__ : Any = self.to_dict() lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n' writer.write(a ) def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self ) -> Dict: return f"""{self.__class__.__name__} {self.to_json_string()}""" def _UpperCAmelCase ( self , a = True ) -> str: if use_diff is True: lowercase__ : List[Any] = self.to_diff_dict() else: lowercase__ : List[str] = self.to_dict() return json.dumps(a , indent=2 , sort_keys=a ) + "\n" def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Tuple = self.to_dict() # get the default config dict lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict() lowercase__ : int = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: lowercase__ : Optional[int] = value return serializable_config_dict
645
1
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = "vision-encoder-decoder" lowerCamelCase__ : Tuple = True def __init__( self , **a ) -> Any: super().__init__(**a ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) lowercase__ : int = kwargs.pop('encoder' ) lowercase__ : Optional[Any] = encoder_config.pop('model_type' ) lowercase__ : Dict = kwargs.pop('decoder' ) lowercase__ : Dict = decoder_config.pop('model_type' ) lowercase__ : Optional[Any] = AutoConfig.for_model(a , **a ) lowercase__ : str = AutoConfig.for_model(a , **a ) lowercase__ : List[str] = True @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> PretrainedConfig: logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) lowercase__ : Optional[Any] = True lowercase__ : str = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : List[Any] = self.encoder.to_dict() lowercase__ : Optional[int] = self.decoder.to_dict() lowercase__ : List[Any] = self.__class__.model_type return output class UpperCAmelCase_ ( _a): lowerCamelCase__ : int = version.parse("1.11") @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: return 1e-4 @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} ) class UpperCAmelCase_ ( _a): @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: lowercase__ : Tuple = OrderedDict() lowercase__ : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} lowercase__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'} lowercase__ : Optional[int] = {0: 'batch', 1: 'encoder_sequence'} return common_inputs def _UpperCAmelCase ( self , a , a = -1 , a = -1 , a = False , a = None , ) -> Mapping[str, Any]: import torch lowercase__ : List[Any] = OrderedDict() lowercase__ : Dict = super().generate_dummy_inputs( a , batch_size=a , seq_length=a , is_pair=a , framework=a ) lowercase__ , lowercase__ : Dict = dummy_input['input_ids'].shape lowercase__ : int = (batch, encoder_sequence, self._config.encoder_hidden_size) lowercase__ : Tuple = dummy_input.pop('input_ids' ) lowercase__ : List[Any] = dummy_input.pop('attention_mask' ) lowercase__ : str = torch.zeros(a ) return common_inputs class UpperCAmelCase_ ( _a): @property def _UpperCAmelCase ( self ) -> None: pass def _UpperCAmelCase ( self , a ) -> OnnxConfig: return VisionEncoderDecoderEncoderOnnxConfig(a ) def _UpperCAmelCase ( self , a , a , a = "default" ) -> OnnxConfig: lowercase__ : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(a , a )
645
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCamelCase : int = 16 _UpperCamelCase : Union[str, Any] = 32 def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' return int(x / 2**20 ) class UpperCAmelCase_ : def __enter__( self ) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ : List[str] = torch.cuda.memory_allocated() return self def __exit__( self , *a ) -> Any: gc.collect() torch.cuda.empty_cache() lowercase__ : Optional[Any] = torch.cuda.memory_allocated() lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated() lowercase__ : List[Any] = bamb(self.end - self.begin ) lowercase__ : List[Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ): '''simple docstring''' lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase__ : Union[str, Any] = load_dataset( 'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} ) def tokenize_function(_lowerCAmelCase : int ): # max_length=None => use the model max length (it's actually the default) lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : Union[str, Any] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) lowercase__ : Dict = DataLoader( tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[int] = config['lr'] lowercase__ : Optional[Any] = int(config['num_epochs'] ) lowercase__ : Optional[Any] = int(config['seed'] ) lowercase__ : int = int(config['batch_size'] ) lowercase__ : Union[str, Any] = args.model_name_or_path set_seed(_lowerCAmelCase ) lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer lowercase__ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ : List[Any] = 1 lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over lowercase__ : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Tuple = 0 # Now we train the model lowercase__ : Optional[Any] = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCAmelCase ): lowercase__ : List[Any] = model(**_lowerCAmelCase ) lowercase__ : Dict = outputs.loss lowercase__ : int = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , ) parser.add_argument( '--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , ) lowercase__ : Any = parser.parse_args() lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
645
1
"""simple docstring""" def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): lowercase__ : Any = f"""Input value of [number={number}] must be an integer""" raise TypeError(_lowerCAmelCase ) if number < 1: lowercase__ : List[Any] = f"""Input value of [number={number}] must be > 0""" raise ValueError(_lowerCAmelCase ) lowercase__ : List[Any] = 1 for i in range(1 , _lowerCAmelCase ): current_number *= 4 * i - 2 current_number //= i + 1 return current_number if __name__ == "__main__": import doctest doctest.testmod()
645
"""simple docstring""" def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Any = [0] * len(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): # use last results for better performance - dynamic programming lowercase__ : List[str] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : Dict = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Union[str, Any] = j return prefix_result def a_ ( _lowerCAmelCase : str ): '''simple docstring''' return max(prefix_function(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
645
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : List[str] = KandinskyVaaControlnetPipeline lowerCamelCase__ : List[str] = ["image_embeds", "negative_image_embeds", "hint"] lowerCamelCase__ : Tuple = ["image_embeds", "negative_image_embeds", "hint"] lowerCamelCase__ : List[str] = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] lowerCamelCase__ : Optional[Any] = False @property def _UpperCAmelCase ( self ) -> Union[str, Any]: return 3_2 @property def _UpperCAmelCase ( self ) -> Optional[int]: return 3_2 @property def _UpperCAmelCase ( self ) -> Union[str, Any]: return self.time_input_dim @property def _UpperCAmelCase ( self ) -> Union[str, Any]: return self.time_input_dim * 4 @property def _UpperCAmelCase ( self ) -> Any: return 1_0_0 @property def _UpperCAmelCase ( self ) -> str: torch.manual_seed(0 ) lowercase__ : List[str] = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } lowercase__ : List[Any] = UNetaDConditionModel(**a ) return model @property def _UpperCAmelCase ( self ) -> List[Any]: return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _UpperCAmelCase ( self ) -> str: torch.manual_seed(0 ) lowercase__ : Tuple = VQModel(**self.dummy_movq_kwargs ) return model def _UpperCAmelCase ( self ) -> int: lowercase__ : Tuple = self.dummy_unet lowercase__ : Tuple = self.dummy_movq lowercase__ : Dict = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule='linear' , beta_start=0.00_085 , beta_end=0.012 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , ) lowercase__ : Any = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def _UpperCAmelCase ( self , a , a=0 ) -> List[str]: lowercase__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a ) ).to(a ) lowercase__ : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( a ) # create hint lowercase__ : Union[str, Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a ) ).to(a ) if str(a ).startswith('mps' ): lowercase__ : str = torch.manual_seed(a ) else: lowercase__ : str = torch.Generator(device=a ).manual_seed(a ) lowercase__ : List[Any] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 6_4, 'width': 6_4, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : List[str] = 'cpu' lowercase__ : Any = self.get_dummy_components() lowercase__ : int = self.pipeline_class(**a ) lowercase__ : str = pipe.to(a ) pipe.set_progress_bar_config(disable=a ) lowercase__ : Tuple = pipe(**self.get_dummy_inputs(a ) ) lowercase__ : Optional[Any] = output.images lowercase__ : Dict = pipe( **self.get_dummy_inputs(a ) , return_dict=a , )[0] lowercase__ : Dict = image[0, -3:, -3:, -1] lowercase__ : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) lowercase__ : str = np.array( [0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' ) lowercase__ : Tuple = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) lowercase__ : Optional[int] = torch.from_numpy(np.array(a ) ).float() / 255.0 lowercase__ : str = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) lowercase__ : str = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(a ) lowercase__ : Optional[Any] = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa ) lowercase__ : str = pipeline.to(a ) pipeline.set_progress_bar_config(disable=a ) lowercase__ : Optional[int] = 'A robot, 4k photo' lowercase__ : int = torch.Generator(device='cuda' ).manual_seed(0 ) lowercase__ , lowercase__ : Optional[int] = pipe_prior( a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple() lowercase__ : Tuple = torch.Generator(device='cuda' ).manual_seed(0 ) lowercase__ : Dict = pipeline( image_embeds=a , negative_image_embeds=a , hint=a , generator=a , num_inference_steps=1_0_0 , output_type='np' , ) lowercase__ : Dict = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(a , a )
645
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]: lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0} lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} lowercase__ : Optional[int] = parent lowercase__ : Optional[int] = batch_size lowercase__ : str = num_channels lowercase__ : Any = image_size lowercase__ : Optional[Any] = min_resolution lowercase__ : int = max_resolution lowercase__ : List[Any] = do_resize lowercase__ : List[str] = size lowercase__ : str = do_center_crop lowercase__ : List[Any] = crop_size lowercase__ : Union[str, Any] = do_flip_channel_order def _UpperCAmelCase ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Tuple = MobileViTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_center_crop' ) ) self.assertTrue(hasattr(a , 'center_crop' ) ) self.assertTrue(hasattr(a , 'do_flip_channel_order' ) ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> str: # Initialize image_processing lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Dict: # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
645
1
"""simple docstring""" def a_ ( _lowerCAmelCase : list ): '''simple docstring''' lowercase__ : str = False while is_sorted is False: # Until all the indices are traversed keep looping lowercase__ : int = True for i in range(0 , len(_lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices if input_list[i] > input_list[i + 1]: lowercase__ , lowercase__ : int = input_list[i + 1], input_list[i] # swapping if elements not in order lowercase__ : Any = False for i in range(1 , len(_lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices if input_list[i] > input_list[i + 1]: lowercase__ , lowercase__ : Union[str, Any] = input_list[i + 1], input_list[i] # swapping if elements not in order lowercase__ : Union[str, Any] = False return input_list if __name__ == "__main__": print("Enter list to be sorted") _UpperCamelCase : List[Any] = [int(x) for x in input().split()] # inputing elements of the list in one line _UpperCamelCase : Tuple = odd_even_sort(input_list) print("The sorted list is") print(sorted_list)
645
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict: lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : List[Any] = seq_length lowercase__ : int = is_training lowercase__ : str = use_attention_mask lowercase__ : Dict = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : Tuple = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Dict = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[str] = type_sequence_label_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Optional[int] = num_choices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : List[str] = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Any = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self ) @slow def _UpperCAmelCase ( self ) -> str: for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' ) lowercase__ : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' ) lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__ : Any = model(a , attention_mask=a )[0] lowercase__ : Tuple = (1, 1_1, 7_6_8) self.assertEqual(output.shape , a ) lowercase__ : Optional[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=2 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> List[str]: lowercase__ : Optional[Any] = parent lowercase__ : List[Any] = 1_3 lowercase__ : Union[str, Any] = 7 lowercase__ : List[str] = True lowercase__ : Dict = True lowercase__ : Tuple = True lowercase__ : str = True lowercase__ : Dict = 9_9 lowercase__ : Any = 3_2 lowercase__ : Dict = 2 lowercase__ : int = 4 lowercase__ : Optional[Any] = 3_7 lowercase__ : Any = 'gelu' lowercase__ : Optional[int] = 0.1 lowercase__ : Optional[int] = 0.1 lowercase__ : Dict = 5_1_2 lowercase__ : Union[str, Any] = 1_6 lowercase__ : Tuple = 2 lowercase__ : Dict = 0.02 lowercase__ : Any = 3 lowercase__ : Optional[int] = 4 lowercase__ : str = None def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Union[str, Any] = None if self.use_input_mask: lowercase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Tuple = None if self.use_token_type_ids: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = None lowercase__ : Any = None lowercase__ : Tuple = None if self.use_labels: lowercase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : Union[str, Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=a , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Optional[int]: lowercase__ : str = TFRoFormerModel(config=a ) lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} lowercase__ : Union[str, Any] = [input_ids, input_mask] lowercase__ : Optional[int] = model(a ) lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Optional[Any]: lowercase__ : Optional[Any] = True lowercase__ : Any = TFRoFormerForCausalLM(config=a ) lowercase__ : Dict = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowercase__ : Optional[int] = model(a )['logits'] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> List[str]: lowercase__ : Tuple = TFRoFormerForMaskedLM(config=a ) lowercase__ : Optional[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowercase__ : Any = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> int: lowercase__ : Optional[Any] = self.num_labels lowercase__ : Dict = TFRoFormerForSequenceClassification(config=a ) lowercase__ : str = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowercase__ : int = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> str: lowercase__ : Optional[int] = self.num_choices lowercase__ : Tuple = TFRoFormerForMultipleChoice(config=a ) lowercase__ : str = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) lowercase__ : Optional[Any] = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) lowercase__ : Tuple = tf.tile(tf.expand_dims(a , 1 ) , (1, self.num_choices, 1) ) lowercase__ : Union[str, Any] = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } lowercase__ : Union[str, Any] = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> List[str]: lowercase__ : Any = self.num_labels lowercase__ : Dict = TFRoFormerForTokenClassification(config=a ) lowercase__ : Tuple = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowercase__ : int = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a , a ) -> Dict: lowercase__ : Optional[int] = TFRoFormerForQuestionAnswering(config=a ) lowercase__ : List[Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } lowercase__ : Dict = model(a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Tuple = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : List[Any] = config_and_inputs lowercase__ : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : List[Any] = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) lowerCamelCase__ : List[str] = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : Any = False def _UpperCAmelCase ( self , a , a , a , a , a ) -> Union[str, Any]: if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Optional[int] = TFRoFormerModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=a , hidden_size=3_7 ) def _UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> str: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : List[str] = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' ) self.assertIsNotNone(a ) @require_tf class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> Any: lowercase__ : List[Any] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' ) lowercase__ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) lowercase__ : Optional[int] = model(a )[0] # TODO Replace vocab size lowercase__ : Any = 5_0_0_0_0 lowercase__ : List[Any] = [1, 6, vocab_size] self.assertEqual(output.shape , a ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. lowercase__ : Optional[Any] = tf.constant( [ [ [-0.12_053_341, -1.0_264_901, 0.29_221_946], [-1.5_133_783, 0.197_433, 0.15_190_607], [-5.0_135_403, -3.900_256, -0.84_038_764], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , a , atol=1e-4 ) @require_tf class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : List[str] = 1E-4 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Union[str, Any] = tf.constant([[4, 1_0]] ) lowercase__ : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) lowercase__ : List[str] = emba(input_ids.shape ) lowercase__ : Optional[Any] = tf.constant( [[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] ) tf.debugging.assert_near(a , a , atol=self.tolerance ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[Any] = tf.constant( [ [0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000], [0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617], [0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870], ] ) lowercase__ : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 ) emba([2, 1_6, 5_1_2] ) lowercase__ : Any = emba.weight[:3, :5] tf.debugging.assert_near(a , a , atol=self.tolerance ) @require_tf class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Union[str, Any] = 1E-4 def _UpperCAmelCase ( self ) -> Any: # 2,12,16,64 lowercase__ : List[Any] = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 lowercase__ : int = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0 lowercase__ : List[Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 ) lowercase__ : str = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :] lowercase__ , lowercase__ : List[str] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( a , a , a ) lowercase__ : List[Any] = tf.constant( [ [0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700], [-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343], [-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985], [-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871], [0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980], [3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253], ] ) lowercase__ : List[Any] = tf.constant( [ [0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700], [0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343], [1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985], [2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871], [-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980], [-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , a , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , a , atol=self.tolerance )
645
"""simple docstring""" from collections.abc import Sequence def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' lowercase__ : int = 0.0 for coeff in reversed(_lowerCAmelCase ): lowercase__ : List[Any] = result * x + coeff return result if __name__ == "__main__": _UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0) _UpperCamelCase : Dict = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
645
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _UpperCamelCase : Any = logging.get_logger(__name__) _UpperCamelCase : Union[str, Any] = { "roberta-base": "https://huggingface.co/roberta-base/resolve/main/config.json", "roberta-large": "https://huggingface.co/roberta-large/resolve/main/config.json", "roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/config.json", "distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/config.json", "roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json", "roberta-large-openai-detector": "https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json", } class UpperCAmelCase_ ( _a): lowerCamelCase__ : int = "roberta" def __init__( self , a=5_0_2_6_5 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=2 , a=0.02 , a=1e-12 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ) -> Optional[Any]: super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a ) lowercase__ : str = vocab_size lowercase__ : str = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Any = num_attention_heads lowercase__ : Dict = hidden_act lowercase__ : List[str] = intermediate_size lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Tuple = type_vocab_size lowercase__ : List[Any] = initializer_range lowercase__ : Any = layer_norm_eps lowercase__ : Optional[int] = position_embedding_type lowercase__ : Union[str, Any] = use_cache lowercase__ : List[Any] = classifier_dropout class UpperCAmelCase_ ( _a): @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase__ : Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
645
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _UpperCamelCase : Any = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def a_ ( _lowerCAmelCase : Optional[Any]=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a)) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = None def _UpperCAmelCase ( self , a , a ) -> List[Any]: with TemporaryDirectory() as tmp_dir: lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a ) lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a ) self.assertTrue(os.path.exists(a ) ) @pytest.mark.integration def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : Optional[int] = import_main_class(dataset_module.module_path ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowercase__ : Optional[int] = None builder_instance.download_and_prepare() lowercase__ : Optional[int] = builder_instance.as_dataset() assert ds @pytest.mark.integration def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert "train" in ds assert isinstance(ds['train'] , _lowerCAmelCase ) assert next(iter(ds['train'] ) )
645
1
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]=False ): '''simple docstring''' try: lowercase__ : Tuple = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : Tuple = default else: # KEY is set, convert it to True or False. try: lowercase__ : Dict = strtobool(SCREAMING_SNAKE_CASE_ ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f"""If set, {key} must be yes or no.""" ) return _value _UpperCamelCase : Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False) _UpperCamelCase : int = parse_flag_from_env("RUN_REMOTE", default=False) _UpperCamelCase : List[Any] = parse_flag_from_env("RUN_LOCAL", default=True) _UpperCamelCase : List[Any] = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression _UpperCamelCase : List[str] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") _UpperCamelCase : int = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") _UpperCamelCase : List[str] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio _UpperCamelCase : List[Any] = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam _UpperCamelCase : str = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility _UpperCamelCase : Optional[int] = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows _UpperCamelCase : List[str] = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip('test requires faiss' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' try: import regex # noqa except ImportError: lowercase__ : Dict = unittest.skip('test requires regex' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip('test requires elasticsearch' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' try: import sqlalchemy # noqa except ImportError: lowercase__ : List[Any] = unittest.skip('test requires sqlalchemy' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip('test requires PyTorch' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Any ): '''simple docstring''' if not config.TF_AVAILABLE: lowercase__ : Dict = unittest.skip('test requires TensorFlow' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' if not config.JAX_AVAILABLE: lowercase__ : str = unittest.skip('test requires JAX' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' if not config.PIL_AVAILABLE: lowercase__ : Optional[int] = unittest.skip('test requires Pillow' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def a_ ( _lowerCAmelCase : List[Any] ): '''simple docstring''' try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' def _require_spacy_model(_lowerCAmelCase : Optional[Any] ): try: import spacy # noqa F401 spacy.load(SCREAMING_SNAKE_CASE_ ) except ImportError: return unittest.skip('test requires spacy' )(SCREAMING_SNAKE_CASE_ ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(SCREAMING_SNAKE_CASE_ ) )(SCREAMING_SNAKE_CASE_ ) else: return test_case return _require_spacy_model def a_ ( _lowerCAmelCase : int ): '''simple docstring''' try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(SCREAMING_SNAKE_CASE_ ) else: return test_case def a_ ( _lowerCAmelCase : Dict ): '''simple docstring''' if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : List[str] = unittest.skip('test is slow' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' if not _run_local_tests or _run_local_tests == 0: lowercase__ : List[Any] = unittest.skip('test is local' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[str] = unittest.skip('test is packaged' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : List[str] = unittest.skip('test requires remote' )(SCREAMING_SNAKE_CASE_ ) return test_case def a_ ( *_lowerCAmelCase : List[Any] ): '''simple docstring''' def decorate(cls : Tuple ): for name, fn in cls.__dict__.items(): if callable(SCREAMING_SNAKE_CASE_ ) and name.startswith('test' ): for decorator in decorators: lowercase__ : Union[str, Any] = decorator(SCREAMING_SNAKE_CASE_ ) setattr(cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return cls return decorate class UpperCAmelCase_ ( _lowerCamelCase): pass class UpperCAmelCase_ ( _lowerCamelCase): lowerCamelCase__ : Union[str, Any] = 0 lowerCamelCase__ : Optional[int] = 1 lowerCamelCase__ : int = 2 @contextmanager def a_ ( _lowerCAmelCase : List[Any]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCAmelCase : int=1E-16 ): '''simple docstring''' lowercase__ : str = requests.Session().request def timeout_request(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , **_lowerCAmelCase : Tuple ): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( f"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" ) lowercase__ : Any = timeout try: return online_request(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : int = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : str = (max_retry_error.args[0].replace('10.255.255.1' , f"""OfflineMock[{url}]""" ),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , **_lowerCAmelCase : int ): raise requests.ConnectionError('Offline mode is enabled.' , request=SCREAMING_SNAKE_CASE_ ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , SCREAMING_SNAKE_CASE_ ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , SCREAMING_SNAKE_CASE_ ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , SCREAMING_SNAKE_CASE_ ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def a_ ( *_lowerCAmelCase : Dict , **_lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Optional[Any] = str(Path().resolve() ) with tempfile.TemporaryDirectory(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) as tmp_dir: try: os.chdir(SCREAMING_SNAKE_CASE_ ) yield finally: os.chdir(SCREAMING_SNAKE_CASE_ ) @contextmanager def a_ ( ): '''simple docstring''' import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def a_ ( ): '''simple docstring''' import gc gc.collect() lowercase__ : Any = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' return deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(SCREAMING_SNAKE_CASE_ ).integers(0 , 100 , 10 ).tolist() def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCAmelCase : Any , *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[int] ): try: return func(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) except HTTPError as err: if str(SCREAMING_SNAKE_CASE_ ).startswith('500' ) or str(SCREAMING_SNAKE_CASE_ ).startswith('502' ): pytest.xfail(str(SCREAMING_SNAKE_CASE_ ) ) raise err return decorator.decorator(_wrapper , SCREAMING_SNAKE_CASE_ ) class UpperCAmelCase_ : def __init__( self , a , a , a ) -> Optional[Any]: lowercase__ : int = returncode lowercase__ : List[str] = stdout lowercase__ : Tuple = stderr async def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] ): '''simple docstring''' while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(SCREAMING_SNAKE_CASE_ ) else: break async def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : str=None , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Union[str, Any]=False ): '''simple docstring''' if echo: print('\nRunning: ' , ' '.join(SCREAMING_SNAKE_CASE_ ) ) lowercase__ : List[str] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=SCREAMING_SNAKE_CASE_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=SCREAMING_SNAKE_CASE_ , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : Tuple = [] lowercase__ : Union[str, Any] = [] def tee(_lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict="" ): lowercase__ : List[str] = line.decode('utf-8' ).rstrip() sink.append(SCREAMING_SNAKE_CASE_ ) if not quiet: print(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , file=SCREAMING_SNAKE_CASE_ ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCAmelCase : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda _lowerCAmelCase : tee(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sys.stderr , label='stderr:' ) ), ] , timeout=SCREAMING_SNAKE_CASE_ , ) return _RunOutput(await p.wait() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[int]=180 , _lowerCAmelCase : Tuple=False , _lowerCAmelCase : Optional[int]=True ): '''simple docstring''' lowercase__ : Tuple = asyncio.get_event_loop() lowercase__ : Any = loop.run_until_complete( _stream_subprocess(SCREAMING_SNAKE_CASE_ , env=SCREAMING_SNAKE_CASE_ , stdin=SCREAMING_SNAKE_CASE_ , timeout=SCREAMING_SNAKE_CASE_ , quiet=SCREAMING_SNAKE_CASE_ , echo=SCREAMING_SNAKE_CASE_ ) ) lowercase__ : Union[str, Any] = ' '.join(SCREAMING_SNAKE_CASE_ ) if result.returncode > 0: lowercase__ : str = '\n'.join(result.stderr ) raise RuntimeError( f"""'{cmd_str}' failed with returncode {result.returncode}\n\n""" f"""The combined stderr from workers follows:\n{stderr}""" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f"""'{cmd_str}' produced no output.""" ) return result def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) lowercase__ : List[str] = re.sub(R'^gw' , '' , SCREAMING_SNAKE_CASE_ , 0 , re.M ) return int(SCREAMING_SNAKE_CASE_ ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = 2_9500 lowercase__ : List[Any] = pytest_xdist_worker_id() return port + uniq_delta
700
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a_ ( _lowerCAmelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): '''simple docstring''' lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_lowerCAmelCase , _lowerCAmelCase ) # Predict target for test data lowercase__ : str = xgb.predict(_lowerCAmelCase ) lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 ) return predictions def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = fetch_california_housing() lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split( _lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 ) lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _UpperCamelCase : Optional[Any] = { "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"], "tokenization_tapas": ["TapasTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[Any] = [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", "TapasPreTrainedModel", "load_tf_weights_in_tapas", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TFTapasForMaskedLM", "TFTapasForQuestionAnswering", "TFTapasForSequenceClassification", "TFTapasModel", "TFTapasPreTrainedModel", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys _UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
701
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]: lowercase__ : str = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Dict = patch_size lowercase__ : Tuple = tubelet_size lowercase__ : Optional[int] = num_frames lowercase__ : Optional[int] = is_training lowercase__ : int = use_labels lowercase__ : Optional[int] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = mask_ratio lowercase__ : Optional[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase__ : Optional[Any] = (image_size // patch_size) ** 2 lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase__ : str = int(mask_ratio * self.seq_length ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : Dict = VideoMAEModel(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = VideoMAEForPreTraining(a ) model.to(a ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Any = torch.ones((self.num_masks,) ) lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase__ : str = model(a , a ) # model only returns predictions for masked patches lowercase__ : str = mask.sum().item() lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowerCamelCase__ : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = False def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = VideoMAEModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]: lowercase__ : Union[str, Any] = copy.deepcopy(a ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase__ : Union[str, Any] = bool_masked_pos.to(a ) if return_labels: if model_class in [ *get_values(a ), ]: lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.has_attentions: pass else: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase__ : Optional[Any] = True lowercase__ : int = False lowercase__ : Any = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Dict = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : List[str] = len(a ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : List[str] = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) ) self.assertEqual(out_len + 1 , len(a ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a ) , a ) lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True check_hidden_states_output(a , a , a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> List[Any]: pass def a_ ( ): '''simple docstring''' lowercase__ : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase__ : str = np.load(_lowerCAmelCase ) return list(_lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( a ) lowercase__ : str = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : Union[str, Any] = model(**a ) # verify the logits lowercase__ : str = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a ) # add boolean mask, indicating which patches to mask lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) lowercase__ : str = torch.load(a ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**a ) # verify the logits lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase__ : List[str] = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a ) self.assertEqual(outputs.logits.shape , a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to( a ) with torch.no_grad(): lowercase__ : Any = model(**a ) lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
645
0
"""simple docstring""" import argparse import json import os import torch from transformers.file_utils import has_file from diffusers import UNetaDConditionModel, UNetaDModel _UpperCamelCase : Any = False _UpperCamelCase : Tuple = True _UpperCamelCase : Optional[Any] = False if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( "--repo_path", default=None, type=str, required=True, help="The config json file corresponding to the architecture.", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") _UpperCamelCase : str = parser.parse_args() _UpperCamelCase : Union[str, Any] = { 'image_size': 'sample_size', 'num_res_blocks': 'layers_per_block', 'block_channels': 'block_out_channels', 'down_blocks': 'down_block_types', 'up_blocks': 'up_block_types', 'downscale_freq_shift': 'freq_shift', 'resnet_num_groups': 'norm_num_groups', 'resnet_act_fn': 'act_fn', 'resnet_eps': 'norm_eps', 'num_head_channels': 'attention_head_dim', } _UpperCamelCase : Optional[int] = { 'time_steps': 'time_proj', 'mid': 'mid_block', 'downsample_blocks': 'down_blocks', 'upsample_blocks': 'up_blocks', } _UpperCamelCase : Dict = '' if has_file(args.repo_path, "config.json") else 'unet' with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader: _UpperCamelCase : int = reader.read() _UpperCamelCase : Any = json.loads(text) if do_only_config: for key in config_parameters_to_change.keys(): config.pop(key, None) if has_file(args.repo_path, "config.json"): _UpperCamelCase : Tuple = UNetaDModel(**config) else: _UpperCamelCase : Tuple = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel _UpperCamelCase : Tuple = class_name(**config) if do_only_config: model.save_config(os.path.join(args.repo_path, subfolder)) _UpperCamelCase : str = dict(model.config) if do_only_renaming: for key, value in config_parameters_to_change.items(): if key in config: _UpperCamelCase : Union[str, Any] = config[key] del config[key] _UpperCamelCase : Optional[Any] = [k.replace("UNetRes", "") for k in config['down_block_types']] _UpperCamelCase : List[Any] = [k.replace("UNetRes", "") for k in config['up_block_types']] if do_only_weights: _UpperCamelCase : Optional[int] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin")) _UpperCamelCase : Any = {} for param_key, param_value in state_dict.items(): if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"): continue _UpperCamelCase : int = False for key, new_key in key_parameters_to_change.items(): if not has_changed and param_key.split(".")[0] == key: _UpperCamelCase : List[str] = param_value _UpperCamelCase : List[Any] = True if not has_changed: _UpperCamelCase : Tuple = param_value model.load_state_dict(new_state_dict) model.save_pretrained(os.path.join(args.repo_path, subfolder))
702
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Dict = value elif weight_type == "weight_v": lowercase__ : List[str] = value elif weight_type == "bias": lowercase__ : Optional[Any] = value else: lowercase__ : List[str] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = [] lowercase__ : List[str] = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue lowercase__ : int = True if "*" in mapped_key: lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : List[Any] = 'weight_g' elif "weight_v" in name: lowercase__ : int = 'weight_v' elif "bias" in name: lowercase__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Union[str, Any] = 'weight' else: lowercase__ : int = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : int = full_name.split('conv_layers.' )[-1] lowercase__ : int = name.split('.' ) lowercase__ : int = int(items[0] ) lowercase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase__ : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ): '''simple docstring''' if config_path is not None: lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: lowercase__ : Any = UniSpeechSatConfig() lowercase__ : Union[str, Any] = '' if is_finetuned: lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase ) else: lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
645
0
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE): def __init__( self , a , a = None , a = None , a = True , a = None , a = False , a = None , a = True , a = "arrow" , **a , ) -> int: super().__init__( split=A_ , features=A_ , cache_dir=A_ , keep_in_memory=A_ , streaming=A_ , **A_ , ) lowercase__ : str = load_from_cache_file lowercase__ : List[str] = file_format lowercase__ : int = Spark( df=A_ , features=A_ , cache_dir=A_ , working_dir=A_ , **A_ , ) def _UpperCAmelCase ( self ) -> Tuple: if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) lowercase__ : int = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=A_ , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
703
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int: lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Dict = image_size lowercase__ : str = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[str] = embed_dim lowercase__ : Any = depths lowercase__ : Dict = num_heads lowercase__ : List[str] = window_size lowercase__ : int = mlp_ratio lowercase__ : Tuple = qkv_bias lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Tuple = drop_path_rate lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = use_absolute_embeddings lowercase__ : Optional[Any] = patch_norm lowercase__ : Any = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : List[str] = is_training lowercase__ : int = scope lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = type_sequence_label_size lowercase__ : List[str] = encoder_stride lowercase__ : Optional[Any] = out_features lowercase__ : Dict = out_indices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Tuple = MaskFormerSwinModel(config=a ) model.to(a ) model.eval() lowercase__ : str = model(a ) lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a ) model.to(a ) model.eval() lowercase__ : int = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(a ): lowercase__ : Dict = ['stem'] lowercase__ : List[str] = MaskFormerSwinBackbone(config=a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase__ : str = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Any = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = MaskFormerSwinModelTester(self ) lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> str: return def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) @unittest.skip('Swin does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip('Swin does not support feedforward chunking' ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> str: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) lowercase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def _UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple: lowercase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swin has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = 3 lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : int = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(a ): lowercase__ : Union[str, Any] = 0 return t def check_equivalence(a , a , a , a={} ): with torch.no_grad(): lowercase__ : Optional[Any] = model(**a , return_dict=a , **a ) lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple() def recursive_check(a , a ): if isinstance(a , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a , a ): recursive_check(a , a ) elif isinstance(a , a ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(a , a ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has""" f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}.""" ) , ) recursive_check(a , a ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) model.to(a ) model.eval() lowercase__ : Tuple = self._prepare_for_class(a , a ) lowercase__ : Optional[Any] = self._prepare_for_class(a , a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a ) lowercase__ : int = self._prepare_for_class(a , a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _a): lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[int] = MaskFormerSwinModelTester(self ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: lowercase__ : Optional[Any] = backbone_class(a ) backbone.to(a ) backbone.eval() lowercase__ : Union[str, Any] = backbone(**a ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , a ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowercase__ : List[str] = backbone(**a , output_hidden_states=a ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowercase__ : List[Any] = backbone(**a , output_attentions=a ) self.assertIsNotNone(outputs.attentions )
645
0
"""simple docstring""" import operator as op def a_ ( _lowerCAmelCase : List[Any] ): '''simple docstring''' lowercase__ : Optional[Any] = [] lowercase__ : Any = lambda _lowerCAmelCase , _lowerCAmelCase : int(x / y ) # noqa: E731 integer division operation lowercase__ : str = { '^': op.pow, '*': op.mul, '/': div, '+': op.add, '-': op.sub, } # operators & their respective operation # print table header print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' ) print('-' * (30 + len(_lowercase )) ) for x in post_fix: if x.isdigit(): # if x in digit stack.append(_lowercase ) # append x to stack # output in tabular format print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(_lowercase ) , sep=' | ' ) else: lowercase__ : Optional[int] = stack.pop() # pop stack # output in tabular format print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(_lowercase ) , sep=' | ' ) lowercase__ : Tuple = stack.pop() # pop stack # output in tabular format print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(_lowercase ) , sep=' | ' ) stack.append( str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack # output in tabular format print( x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(_lowercase ) , sep=' | ' , ) return int(stack[0] ) if __name__ == "__main__": _UpperCamelCase : List[str] = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ") print("\n\tResult = ", solve(Postfix))
704
"""simple docstring""" import math def a_ ( _lowerCAmelCase : int = 100 ): '''simple docstring''' lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) ) lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
645
0
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : list[str] | None = None , _lowerCAmelCase : dict[str, float] | None = None , _lowerCAmelCase : bool = False , ): '''simple docstring''' lowercase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) lowercase__ : Dict = { "a": 0.0_8_4_9_7, "b": 0.0_1_4_9_2, "c": 0.0_2_2_0_2, "d": 0.0_4_2_5_3, "e": 0.1_1_1_6_2, "f": 0.0_2_2_2_8, "g": 0.0_2_0_1_5, "h": 0.0_6_0_9_4, "i": 0.0_7_5_4_6, "j": 0.0_0_1_5_3, "k": 0.0_1_2_9_2, "l": 0.0_4_0_2_5, "m": 0.0_2_4_0_6, "n": 0.0_6_7_4_9, "o": 0.0_7_5_0_7, "p": 0.0_1_9_2_9, "q": 0.0_0_0_9_5, "r": 0.0_7_5_8_7, "s": 0.0_6_3_2_7, "t": 0.0_9_3_5_6, "u": 0.0_2_7_5_8, "v": 0.0_0_9_7_8, "w": 0.0_2_5_6_0, "x": 0.0_0_1_5_0, "y": 0.0_1_9_9_4, "z": 0.0_0_0_7_7, } else: # Custom frequencies dictionary lowercase__ : Optional[int] = frequencies_dict if not case_sensitive: lowercase__ : str = ciphertext.lower() # Chi squared statistic values lowercase__ : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(_A ) ): lowercase__ : Optional[Any] = "" # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet lowercase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len( _A ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter lowercase__ : str = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: lowercase__ : List[str] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message lowercase__ : List[str] = decrypted_with_shift.lower().count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowercase__ : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowercase__ : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message lowercase__ : Any = decrypted_with_shift.count(_A ) # Get the excepcted amount of times the letter should appear based # on letter frequencies lowercase__ : str = frequencies[letter] * occurrences # Complete the chi squared statistic formula lowercase__ : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary lowercase__ : Optional[int] = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(_lowerCAmelCase : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] lowercase__ : int = min( _A , key=_A , ) # Get all the data from the most likely cipher (key, decoded message) ( lowercase__ ) : int = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
705
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : List[Any] = controlnet_params lowercase__ : int = 'bird' lowercase__ : List[Any] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples ) lowercase__ : List[Any] = jax.random.PRNGKey(0 ) lowercase__ : Tuple = jax.random.split(a , jax.device_count() ) lowercase__ : str = replicate(a ) lowercase__ : List[str] = shard(a ) lowercase__ : Dict = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : Optional[Any] = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : Optional[Any] = controlnet_params lowercase__ : List[Any] = 'Chef in the kitchen' lowercase__ : List[str] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) lowercase__ : List[str] = jax.random.PRNGKey(0 ) lowercase__ : str = jax.random.split(a , jax.device_count() ) lowercase__ : Optional[Any] = replicate(a ) lowercase__ : Optional[Any] = shard(a ) lowercase__ : List[Any] = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : str = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
645
0
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 _UpperCamelCase : Union[str, Any] = data_utils.TransfoXLTokenizer _UpperCamelCase : List[str] = data_utils.TransfoXLCorpus _UpperCamelCase : str = data_utils _UpperCamelCase : Dict = data_utils def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ): '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowerCamelCase_ , 'rb' ) as fp: lowercase__ : List[str] = pickle.load(lowerCamelCase_ , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowercase__ : str = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowercase__ : Any = corpus.vocab.__dict__ torch.save(lowerCamelCase_ , lowerCamelCase_ ) lowercase__ : Optional[Any] = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , lowerCamelCase_ ) lowercase__ : Tuple = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(lowerCamelCase_ , lowerCamelCase_ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowercase__ : Optional[int] = os.path.abspath(lowerCamelCase_ ) lowercase__ : Optional[int] = os.path.abspath(lowerCamelCase_ ) print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowercase__ : List[Any] = TransfoXLConfig() else: lowercase__ : str = TransfoXLConfig.from_json_file(lowerCamelCase_ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowercase__ : Optional[int] = TransfoXLLMHeadModel(lowerCamelCase_ ) lowercase__ : int = load_tf_weights_in_transfo_xl(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model lowercase__ : Union[str, Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) lowercase__ : str = os.path.join(lowerCamelCase_ , lowerCamelCase_ ) print(f"""Save PyTorch model to {os.path.abspath(lowerCamelCase_ )}""" ) torch.save(model.state_dict() , lowerCamelCase_ ) print(f"""Save configuration file to {os.path.abspath(lowerCamelCase_ )}""" ) with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) _UpperCamelCase : List[str] = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
706
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
645
0
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ): '''simple docstring''' if days_between_payments <= 0: raise ValueError('days_between_payments must be > 0' ) if daily_interest_rate < 0: raise ValueError('daily_interest_rate must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return principal * daily_interest_rate * days_between_payments def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if number_of_compounding_periods <= 0: raise ValueError('number_of_compounding_periods must be > 0' ) if nominal_annual_interest_rate_percentage < 0: raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return principal * ( (1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods - 1 ) def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if number_of_years <= 0: raise ValueError('number_of_years must be > 0' ) if nominal_annual_percentage_rate < 0: raise ValueError('nominal_annual_percentage_rate must be >= 0' ) if principal <= 0: raise ValueError('principal must be > 0' ) return compound_interest( _lowerCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 ) if __name__ == "__main__": import doctest doctest.testmod()
707
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' ) lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' ) lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(**a ) lowercase__ : List[Any] = outputs.logits lowercase__ : Union[str, Any] = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , a ) lowercase__ : Tuple = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
645
0
"""simple docstring""" from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class UpperCAmelCase_ ( __A): def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[int] = SMALL_MODEL_IDENTIFIER lowercase__ : int = '''pt''' lowercase__ : Tuple = '''tf''' def _UpperCAmelCase ( self , a ) -> int: lowercase__ : List[Any] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(a ) def _UpperCAmelCase ( self , a ) -> int: lowercase__ : Tuple = TFAutoModel.from_pretrained(self.test_model , from_pt=a ) model_tf.save_pretrained(a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[Any] = '''mock_framework''' # Framework provided - return whatever the user provides lowercase__ : Optional[int] = FeaturesManager.determine_framework(self.test_model , a ) self.assertEqual(a , a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(a ) lowercase__ : Union[str, Any] = FeaturesManager.determine_framework(a , a ) self.assertEqual(a , a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(a ) lowercase__ : Optional[Any] = FeaturesManager.determine_framework(a , a ) self.assertEqual(a , a ) def _UpperCAmelCase ( self ) -> List[str]: with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(a ) lowercase__ : List[str] = FeaturesManager.determine_framework(a ) self.assertEqual(a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(a ) lowercase__ : Optional[Any] = FeaturesManager.determine_framework(a ) self.assertEqual(a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(a ): lowercase__ : Union[str, Any] = FeaturesManager.determine_framework(a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[str] = MagicMock(return_value=a ) with patch('transformers.onnx.features.is_tf_available' , a ): lowercase__ : List[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow lowercase__ : Optional[int] = MagicMock(return_value=a ) with patch('transformers.onnx.features.is_torch_available' , a ): lowercase__ : Tuple = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(a , self.framework_tf ) # Both in environment -> use PyTorch lowercase__ : List[str] = MagicMock(return_value=a ) lowercase__ : List[str] = MagicMock(return_value=a ) with patch('transformers.onnx.features.is_tf_available' , a ), patch( 'transformers.onnx.features.is_torch_available' , a ): lowercase__ : Tuple = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(a , self.framework_pt ) # Both not in environment -> raise error lowercase__ : str = MagicMock(return_value=a ) lowercase__ : str = MagicMock(return_value=a ) with patch('transformers.onnx.features.is_tf_available' , a ), patch( 'transformers.onnx.features.is_torch_available' , a ): with self.assertRaises(a ): lowercase__ : List[str] = FeaturesManager.determine_framework(self.test_model )
708
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : @staticmethod def _UpperCAmelCase ( *a , **a ) -> int: pass def a_ ( _lowerCAmelCase : Image ): '''simple docstring''' lowercase__ : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , a , a ) -> Optional[int]: lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a ) import datasets lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowercase__ : List[Any] = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , a , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Tuple = 'Intel/dpt-large' lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a ) lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowercase__ : Optional[Any] = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: # This is highly irregular to have no small tests. self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
645
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : @staticmethod def _UpperCAmelCase ( *a , **a ) -> Tuple: pass @is_pipeline_test @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Any = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def _UpperCAmelCase ( self , a , a , a ) -> Optional[Any]: lowercase__ : Optional[Any] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) lowercase__ : Optional[int] = [ { 'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'question': 'How many cats are there?', }, { 'image': './tests/fixtures/tests_samples/COCO/000000039769.png', 'question': 'How many cats are there?', }, ] return vqa_pipeline, examples def _UpperCAmelCase ( self , a , a ) -> int: lowercase__ : Optional[Any] = vqa_pipeline(_lowerCAmelCase , top_k=1 ) self.assertEqual( _lowerCAmelCase , [ [{'score': ANY(_lowerCAmelCase ), 'answer': ANY(_lowerCAmelCase )}], [{'score': ANY(_lowerCAmelCase ), 'answer': ANY(_lowerCAmelCase )}], ] , ) @require_torch def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[str] = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' ) lowercase__ : List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png' lowercase__ : str = 'How many cats are there?' lowercase__ : int = vqa_pipeline(image=_lowerCAmelCase , question='How many cats are there?' , top_k=2 ) self.assertEqual( _lowerCAmelCase , [{'score': ANY(_lowerCAmelCase ), 'answer': ANY(_lowerCAmelCase )}, {'score': ANY(_lowerCAmelCase ), 'answer': ANY(_lowerCAmelCase )}] ) lowercase__ : str = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( _lowerCAmelCase , [{'score': ANY(_lowerCAmelCase ), 'answer': ANY(_lowerCAmelCase )}, {'score': ANY(_lowerCAmelCase ), 'answer': ANY(_lowerCAmelCase )}] ) @slow @require_torch def _UpperCAmelCase ( self ) -> int: lowercase__ : int = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' ) lowercase__ : Union[str, Any] = './tests/fixtures/tests_samples/COCO/000000039769.png' lowercase__ : Any = 'How many cats are there?' lowercase__ : Optional[int] = vqa_pipeline(image=_lowerCAmelCase , question=_lowerCAmelCase , top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) lowercase__ : List[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}] ) lowercase__ : List[str] = vqa_pipeline( [{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 ) self.assertEqual( nested_simplify(_lowerCAmelCase , decimals=4 ) , [[{'score': 0.8_799, 'answer': '2'}, {'score': 0.296, 'answer': '1'}]] * 2 , ) @require_tf @unittest.skip('Visual question answering not implemented in TF' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass
709
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class UpperCAmelCase_ ( _a): def __init__( self ) -> Any: lowercase__ : Tuple = [] def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_init_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]: self.events.append('on_train_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_train_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_epoch_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]: self.events.append('on_epoch_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_step_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> str: self.events.append('on_step_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_evaluate' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple: self.events.append('on_predict' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]: self.events.append('on_save' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_log' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_prediction_step' ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> str: lowercase__ : str = tempfile.mkdtemp() def _UpperCAmelCase ( self ) -> Dict: shutil.rmtree(self.output_dir ) def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowercase__ : str = RegressionDataset(length=a ) lowercase__ : Any = RegressionDataset(length=a ) lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a ) lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a ) lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a ) return Trainer( a , a , train_dataset=a , eval_dataset=a , callbacks=a , ) def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]: self.assertEqual(len(a ) , len(a ) ) # Order doesn't matter lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) for cba, cba in zip(a , a ): if isinstance(a , a ) and isinstance(a , a ): self.assertEqual(a , a ) elif isinstance(a , a ) and not isinstance(a , a ): self.assertEqual(a , cba.__class__ ) elif not isinstance(a , a ) and isinstance(a , a ): self.assertEqual(cba.__class__ , a ) else: self.assertEqual(a , a ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: lowercase__ : Dict = ['on_init_end', 'on_train_begin'] lowercase__ : List[Any] = 0 lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() ) lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(a ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : int = self.get_trainer() lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # Callbacks passed at init are added to the default callbacks lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a ) lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : List[str] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Optional[Any] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(a ) self.assertEqual(cb.__class__ , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # We can also add, pop, or remove by instance lowercase__ : int = self.get_trainer() lowercase__ : List[str] = trainer.callback_handler.callbacks[0] trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Tuple = self.get_trainer() lowercase__ : Dict = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(a ) self.assertEqual(a , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Tuple: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=a ) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # Independent log/save/eval lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: lowercase__ : str = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(a ) in warn_mock.call_args[0][0]
645
0
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def a_ ( *_lowerCAmelCase : Dict , _lowerCAmelCase : Tuple = None , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=2 ): '''simple docstring''' from .. import __version__ lowercase__ : Optional[Any] = take_from lowercase__ : Any = () if not isinstance(args[0] , lowerCamelCase__ ): lowercase__ : Optional[int] = (args,) for attribute, version_name, message in args: if version.parse(version.parse(lowerCamelCase__ ).base_version ) >= version.parse(lowerCamelCase__ ): raise ValueError( f"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" f""" version {__version__} is >= {version_name}""" ) lowercase__ : List[Any] = None if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(lowerCamelCase__ ),) lowercase__ : List[Any] = f"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(lowerCamelCase__ , lowerCamelCase__ ): values += (getattr(lowerCamelCase__ , lowerCamelCase__ ),) lowercase__ : Any = f"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: lowercase__ : Optional[int] = f"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: lowercase__ : Optional[int] = warning + " " if standard_warn else "" warnings.warn(warning + message , lowerCamelCase__ , stacklevel=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0: lowercase__ : str = inspect.getouterframes(inspect.currentframe() )[1] lowercase__ : Union[str, Any] = call_frame.filename lowercase__ : List[str] = call_frame.lineno lowercase__ : Dict = call_frame.function lowercase__ : Tuple = next(iter(deprecated_kwargs.items() ) ) raise TypeError(f"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(lowerCamelCase__ ) == 0: return elif len(lowerCamelCase__ ) == 1: return values[0] return values
710
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _UpperCamelCase : str = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
645
0
"""simple docstring""" import math _UpperCamelCase : Any = 10 _UpperCamelCase : List[str] = 7 _UpperCamelCase : int = BALLS_PER_COLOUR * NUM_COLOURS def a_ ( _lowerCAmelCase : int = 20 ): '''simple docstring''' lowercase__ : Optional[int] = math.comb(_lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Union[str, Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _lowerCAmelCase ) lowercase__ : Any = NUM_COLOURS * (1 - missing_colour / total) return f"""{result:.9f}""" if __name__ == "__main__": print(solution(20))
711
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self , a ) -> str: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): lowercase__ : str = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = 'sshleifer/tiny-gpt2' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : List[Any] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : Tuple = TensorFlowBenchmark(a , [config] ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : List[str] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : List[str] = TensorFlowBenchmark(a , [config] ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Optional[int] = AutoConfig.from_pretrained(a ) lowercase__ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a , [config] ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random' lowercase__ : Any = AutoConfig.from_pretrained(a ) lowercase__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : int = TensorFlowBenchmark(a , configs=[config] ) lowercase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , ) lowercase__ : Any = TensorFlowBenchmark(a ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Any = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , ) lowercase__ : Union[str, Any] = TensorFlowBenchmark(a ) benchmark.run() self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Tuple = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(a ): self.assertTrue(hasattr(a , 'sequential' ) ) self.assertTrue(hasattr(a , 'cumulative' ) ) self.assertTrue(hasattr(a , 'current' ) ) self.assertTrue(hasattr(a , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , ) lowercase__ : Optional[int] = TensorFlowBenchmark(a ) lowercase__ : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
645
0
"""simple docstring""" import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=sys.stdout, ) _UpperCamelCase : Dict = logging.getLogger(__name__) _UpperCamelCase : int = {"facebook/bart-base": BartForConditionalGeneration} _UpperCamelCase : List[str] = {"facebook/bart-base": BartTokenizer} def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' ) parser.add_argument( '--validation_file' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='A csv or a json file containing the validation data.' ) parser.add_argument( '--max_length' , type=__lowerCAmelCase , default=5 , help='The maximum total input sequence length after tokenization.' , ) parser.add_argument( '--num_beams' , type=__lowerCAmelCase , default=__lowerCAmelCase , help=( 'Number of beams to use for evaluation. This argument will be ' 'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.' ) , ) parser.add_argument( '--model_name_or_path' , type=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=__lowerCAmelCase , ) parser.add_argument( '--config_name' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name' , ) parser.add_argument( '--device' , type=__lowerCAmelCase , default='cpu' , help='Device where the model will be run' , ) parser.add_argument('--output_file_path' , type=__lowerCAmelCase , default=__lowerCAmelCase , help='Where to store the final ONNX file.' ) lowercase__ : Optional[Any] = parser.parse_args() return args def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]="cpu" ): '''simple docstring''' lowercase__ : List[Any] = model_dict[model_name].from_pretrained(__lowerCAmelCase ).to(__lowerCAmelCase ) lowercase__ : Optional[Any] = tokenizer_dict[model_name].from_pretrained(__lowerCAmelCase ) if model_name in ["facebook/bart-base"]: lowercase__ : Optional[Any] = 0 lowercase__ : Optional[Any] = None lowercase__ : Optional[int] = 0 return huggingface_model, tokenizer def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ): '''simple docstring''' model.eval() lowercase__ : Dict = None lowercase__ : str = torch.jit.script(BARTBeamSearchGenerator(__lowerCAmelCase ) ) with torch.no_grad(): lowercase__ : Optional[int] = """My friends are cool but they eat too many carbs.""" lowercase__ : List[str] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device ) lowercase__ : int = model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=__lowerCAmelCase , max_length=__lowerCAmelCase , early_stopping=__lowerCAmelCase , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( __lowerCAmelCase , ( inputs['input_ids'], inputs['attention_mask'], num_beams, max_length, model.config.decoder_start_token_id, ) , __lowerCAmelCase , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={ 'input_ids': {0: 'batch', 1: 'seq'}, 'output_ids': {0: 'batch', 1: 'seq_out'}, } , example_outputs=__lowerCAmelCase , ) logger.info('Model exported to {}'.format(__lowerCAmelCase ) ) lowercase__ : Optional[int] = remove_dup_initializers(os.path.abspath(__lowerCAmelCase ) ) logger.info('Deduplicated and optimized model written to {}'.format(__lowerCAmelCase ) ) lowercase__ : Dict = onnxruntime.InferenceSession(__lowerCAmelCase ) lowercase__ : str = ort_sess.run( __lowerCAmelCase , { 'input_ids': inputs['input_ids'].cpu().numpy(), 'attention_mask': inputs['attention_mask'].cpu().numpy(), 'num_beams': np.array(__lowerCAmelCase ), 'max_length': np.array(__lowerCAmelCase ), 'decoder_start_token_id': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('Model outputs from torch and ONNX Runtime are similar.' ) logger.info('Success.' ) def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = parse_args() lowercase__ : List[Any] = 5 lowercase__ : Dict = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() lowercase__ : Tuple = torch.device(args.device ) lowercase__ : List[str] = load_model_tokenizer(args.model_name_or_path , __lowerCAmelCase ) if model.config.decoder_start_token_id is None: raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' ) model.to(__lowerCAmelCase ) if args.max_length: lowercase__ : Dict = args.max_length if args.num_beams: lowercase__ : Union[str, Any] = args.num_beams if args.output_file_path: lowercase__ : List[Any] = args.output_file_path else: lowercase__ : Optional[Any] = """BART.onnx""" logger.info('Exporting model to ONNX' ) export_and_validate_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if __name__ == "__main__": main()
712
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class UpperCAmelCase_ ( _a): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any: lowercase__ : Tuple = parent lowercase__ : List[Any] = batch_size lowercase__ : List[Any] = seq_length lowercase__ : List[Any] = is_training lowercase__ : Optional[Any] = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : int = use_labels lowercase__ : Tuple = vocab_size lowercase__ : int = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : List[str] = type_vocab_size lowercase__ : Tuple = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = num_labels lowercase__ : Tuple = num_choices lowercase__ : str = scope def _UpperCAmelCase ( self ) -> Any: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_input_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Dict = None lowercase__ : Optional[Any] = None lowercase__ : int = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> Optional[int]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Tuple = DistilBertModel(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , a ) lowercase__ : str = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a ) model.to(a ) model.eval() lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int: lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]: lowercase__ : int = self.num_labels lowercase__ : Dict = DistilBertForSequenceClassification(a ) model.to(a ) model.eval() lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any: lowercase__ : Any = self.num_labels lowercase__ : List[str] = DistilBertForTokenClassification(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple: lowercase__ : List[Any] = self.num_choices lowercase__ : Any = DistilBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : List[str] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ : str = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Any = True lowerCamelCase__ : List[Any] = True lowerCamelCase__ : Optional[Any] = True def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : str = DistilBertModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 ) def _UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = DistilBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @slow @require_torch_gpu def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase__ : Optional[int] = True lowercase__ : Union[str, Any] = model_class(config=a ) lowercase__ : int = self._prepare_for_class(a , a ) lowercase__ : Tuple = torch.jit.trace( a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) ) lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a ) loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' ) lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__ : Optional[Any] = model(a , attention_mask=a )[0] lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , a ) lowercase__ : List[Any] = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _UpperCamelCase : Union[str, Any] = { "configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = [ "NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST", "NezhaForNextSentencePrediction", "NezhaForMaskedLM", "NezhaForPreTraining", "NezhaForMultipleChoice", "NezhaForQuestionAnswering", "NezhaForSequenceClassification", "NezhaForTokenClassification", "NezhaModel", "NezhaPreTrainedModel", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys _UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
713
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" import random def a_ ( _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ : Dict = num - 1 lowercase__ : Optional[Any] = 0 while s % 2 == 0: lowercase__ : List[Any] = s // 2 t += 1 for _ in range(5 ): lowercase__ : List[str] = random.randrange(2 , num - 1 ) lowercase__ : Union[str, Any] = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) if v != 1: lowercase__ : Tuple = 0 while v != (num - 1): if i == t - 1: return False else: lowercase__ : str = i + 1 lowercase__ : Any = (v**2) % num return True def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if num < 2: return False lowercase__ : List[str] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(lowerCAmelCase__ ) def a_ ( _lowerCAmelCase : Union[str, Any] = 1024 ): '''simple docstring''' while True: lowercase__ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) ) if is_prime_low_num(lowerCAmelCase__ ): return num if __name__ == "__main__": _UpperCamelCase : int = generate_large_prime() print(("Prime number:", num)) print(("is_prime_low_num:", is_prime_low_num(num)))
714
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any: lowercase__ : List[str] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[int] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : str = is_training lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[Any] = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Any = type_sequence_label_size lowercase__ : Dict = initializer_range lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Tuple = n_targets lowercase__ : Optional[int] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase__ : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase__ : int = [] for i in range(self.batch_size ): lowercase__ : Optional[Any] = {} lowercase__ : Any = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=a ) lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a ) labels.append(a ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> List[Any]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _UpperCAmelCase ( self , a , a , a ) -> int: lowercase__ : List[str] = YolosModel(config=a ) model.to(a ) model.eval() lowercase__ : List[Any] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = YolosForObjectDetection(a ) model.to(a ) model.eval() lowercase__ : Dict = model(pixel_values=a ) lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase__ : str = model(pixel_values=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs lowercase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCamelCase__ : List[str] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Tuple = False lowerCamelCase__ : Union[str, Any] = False def _UpperCAmelCase ( self , a , a , a=False ) -> Dict: lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase__ : Optional[Any] = [] for i in range(self.model_tester.batch_size ): lowercase__ : Dict = {} lowercase__ : Dict = torch.ones( size=(self.model_tester.n_targets,) , device=a , dtype=torch.long ) lowercase__ : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=a , dtype=torch.float ) labels.append(a ) lowercase__ : Union[str, Any] = labels return inputs_dict def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Dict = YolosModelTester(self ) lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: # YOLOS does not use inputs_embeds pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) lowercase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict = True # in YOLOS, the seq_len is different lowercase__ : Tuple = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : str = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : Optional[int] = True lowercase__ : List[Any] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[str] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : Dict = len(a ) # Check attention is always last and order is fine lowercase__ : Any = True lowercase__ : int = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowercase__ : Tuple = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> List[str]: def check_hidden_states_output(a , a , a ): lowercase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(a , a ) ) lowercase__ : int = outputs.hidden_states lowercase__ : Any = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a ) , a ) # YOLOS has a different seq_length lowercase__ : Optional[int] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[Any] = True check_hidden_states_output(a , a , a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*a ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : int = YolosModel.from_pretrained(a ) self.assertIsNotNone(a ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a ) lowercase__ : Tuple = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : int = model(inputs.pixel_values ) # verify outputs lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : Any = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , ) lowercase__ : List[str] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) ) # verify postprocessing lowercase__ : Optional[Any] = image_processor.post_process_object_detection( a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a ) lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7] lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , a ) self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
645
0
"""simple docstring""" from math import factorial def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ): '''simple docstring''' if n < k or k < 0: raise ValueError('Please enter positive integers for n and k where n >= k' ) return factorial(__A ) // (factorial(__A ) * factorial(n - k )) if __name__ == "__main__": print( "The number of five-card hands possible from a standard", f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( "If a class of 40 students must be arranged into groups of", f'''4 for group projects, there are {combinations(40, 4)} ways''', "to arrange them.\n", ) print( "If 10 teams are competing in a Formula One race, there", f'''are {combinations(10, 3)} ways that first, second and''', "third place can be awarded.", )
715
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _UpperCamelCase : int = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple: lowercase__ : str = load_in_abit lowercase__ : str = load_in_abit lowercase__ : List[str] = llm_inta_threshold lowercase__ : Dict = llm_inta_skip_modules lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload lowercase__ : Any = llm_inta_has_fpaa_weight lowercase__ : Any = bnb_abit_quant_type lowercase__ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: lowercase__ : Dict = torch.floataa elif isinstance(a , a ): lowercase__ : Any = getattr(a , a ) elif isinstance(a , torch.dtype ): lowercase__ : Any = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _UpperCAmelCase ( self ) -> str: if not isinstance(self.llm_inta_threshold , a ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , a ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , a ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , a ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _UpperCAmelCase ( self ) -> Tuple: return self.load_in_abit or self.load_in_abit def _UpperCAmelCase ( self ) -> List[str]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]: lowercase__ : List[Any] = cls(**a ) lowercase__ : Union[str, Any] = [] for key, value in kwargs.items(): if hasattr(a , a ): setattr(a , a , a ) to_remove.append(a ) for key in to_remove: kwargs.pop(a , a ) if return_unused_kwargs: return config, kwargs else: return config def _UpperCAmelCase ( self , a ) -> Dict: with open(a , 'w' , encoding='utf-8' ) as writer: lowercase__ : Any = self.to_dict() lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n' writer.write(a ) def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self ) -> Dict: return f"""{self.__class__.__name__} {self.to_json_string()}""" def _UpperCAmelCase ( self , a = True ) -> str: if use_diff is True: lowercase__ : List[Any] = self.to_diff_dict() else: lowercase__ : List[str] = self.to_dict() return json.dumps(a , indent=2 , sort_keys=a ) + "\n" def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Tuple = self.to_dict() # get the default config dict lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict() lowercase__ : int = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: lowercase__ : Optional[int] = value return serializable_config_dict
645
0
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : Optional[int] = {"vocab_file": "spiece.model"} _UpperCamelCase : int = { "vocab_file": { "xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model", "xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model", } } _UpperCamelCase : List[Any] = { "xlnet-base-cased": None, "xlnet-large-cased": None, } # Segments (not really needed) _UpperCamelCase : Optional[Any] = 0 _UpperCamelCase : Tuple = 1 _UpperCamelCase : Union[str, Any] = 2 _UpperCamelCase : List[Any] = 3 _UpperCamelCase : Tuple = 4 class UpperCAmelCase_ ( a__): lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : int = "left" def __init__( self , a , a=False , a=True , a=False , a="<s>" , a="</s>" , a="<unk>" , a="<sep>" , a="<pad>" , a="<cls>" , a="<mask>" , a=["<eop>", "<eod>"] , a = None , **a , ) -> Any: lowercase__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token lowercase__ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) lowercase__ : int = 3 lowercase__ : Union[str, Any] = do_lower_case lowercase__ : Dict = remove_space lowercase__ : int = keep_accents lowercase__ : List[str] = vocab_file lowercase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_A ) @property def _UpperCAmelCase ( self ) -> Any: return len(self.sp_model ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[Any] = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> List[str]: lowercase__ : List[Any] = self.__dict__.copy() lowercase__ : List[str] = None return state def __setstate__( self , a ) -> str: lowercase__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowercase__ : Union[str, Any] = {} lowercase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self , a ) -> Optional[int]: if self.remove_space: lowercase__ : str = ' '.join(inputs.strip().split() ) else: lowercase__ : Dict = inputs lowercase__ : List[str] = outputs.replace('``' , '"' ).replace('\'\'' , '"' ) if not self.keep_accents: lowercase__ : Optional[Any] = unicodedata.normalize('NFKD' , _A ) lowercase__ : Dict = ''.join([c for c in outputs if not unicodedata.combining(_A )] ) if self.do_lower_case: lowercase__ : Tuple = outputs.lower() return outputs def _UpperCAmelCase ( self , a ) -> Optional[int]: lowercase__ : Union[str, Any] = self.preprocess_text(_A ) lowercase__ : int = self.sp_model.encode(_A , out_type=_A ) lowercase__ : int = [] for piece in pieces: if len(_A ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit(): lowercase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , '' ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: lowercase__ : int = cur_pieces[1:] else: lowercase__ : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(_A ) else: new_pieces.append(_A ) return new_pieces def _UpperCAmelCase ( self , a ) -> str: return self.sp_model.PieceToId(_A ) def _UpperCAmelCase ( self , a ) -> str: return self.sp_model.IdToPiece(_A ) def _UpperCAmelCase ( self , a ) -> Dict: lowercase__ : Optional[Any] = ''.join(_A ).replace(_A , ' ' ).strip() return out_string def _UpperCAmelCase ( self , a , a = False , a = None , a = True , **a , ) -> List[str]: lowercase__ : Dict = kwargs.pop('use_source_tokenizer' , _A ) lowercase__ : Dict = self.convert_ids_to_tokens(_A , skip_special_tokens=_A ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 lowercase__ : Optional[Any] = [] lowercase__ : int = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A ) ) lowercase__ : Tuple = [] sub_texts.append(_A ) else: current_sub_text.append(_A ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_A ) ) # Mimic the behavior of the Rust tokenizer: # By default, there are no spaces between special tokens lowercase__ : List[Any] = ''.join(_A ) lowercase__ : Tuple = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: lowercase__ : int = self.clean_up_tokenization(_A ) return clean_text else: return text def _UpperCAmelCase ( self , a , a = None ) -> str: lowercase__ : str = [self.sep_token_id] lowercase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _UpperCAmelCase ( self , a , a = None , a = False ) -> Tuple: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A ) if token_ids_a is not None: return ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1, 1] return ([0] * len(_A )) + [1, 1] def _UpperCAmelCase ( self , a , a = None ) -> Optional[Any]: lowercase__ : str = [self.sep_token_id] lowercase__ : Any = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _UpperCAmelCase ( self , a , a = None ) -> List[str]: if not os.path.isdir(_A ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : str = os.path.join( _A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _A ) elif not os.path.isfile(self.vocab_file ): with open(_A , 'wb' ) as fi: lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(_A ) return (out_vocab_file,)
716
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCamelCase : int = 16 _UpperCamelCase : Union[str, Any] = 32 def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' return int(x / 2**20 ) class UpperCAmelCase_ : def __enter__( self ) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ : List[str] = torch.cuda.memory_allocated() return self def __exit__( self , *a ) -> Any: gc.collect() torch.cuda.empty_cache() lowercase__ : Optional[Any] = torch.cuda.memory_allocated() lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated() lowercase__ : List[Any] = bamb(self.end - self.begin ) lowercase__ : List[Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ): '''simple docstring''' lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase__ : Union[str, Any] = load_dataset( 'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} ) def tokenize_function(_lowerCAmelCase : int ): # max_length=None => use the model max length (it's actually the default) lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : Union[str, Any] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) lowercase__ : Dict = DataLoader( tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[int] = config['lr'] lowercase__ : Optional[Any] = int(config['num_epochs'] ) lowercase__ : Optional[Any] = int(config['seed'] ) lowercase__ : int = int(config['batch_size'] ) lowercase__ : Union[str, Any] = args.model_name_or_path set_seed(_lowerCAmelCase ) lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer lowercase__ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ : List[Any] = 1 lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over lowercase__ : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Tuple = 0 # Now we train the model lowercase__ : Optional[Any] = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCAmelCase ): lowercase__ : List[Any] = model(**_lowerCAmelCase ) lowercase__ : Dict = outputs.loss lowercase__ : int = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , ) parser.add_argument( '--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , ) lowercase__ : Any = parser.parse_args() lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
645
0
"""simple docstring""" from __future__ import annotations from cmath import sqrt def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ): '''simple docstring''' if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) lowercase__ : Optional[Any] = b * b - 4 * a * c lowercase__ : Optional[int] = (-b + sqrt(_lowerCamelCase )) / (2 * a) lowercase__ : int = (-b - sqrt(_lowerCamelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a_ ( ): '''simple docstring''' lowercase__ : List[str] = quadratic_roots(a=5 , b=6 , c=1 ) print(f"""The solutions are: {solutiona} and {solutiona}""" ) if __name__ == "__main__": main()
717
"""simple docstring""" def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Any = [0] * len(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): # use last results for better performance - dynamic programming lowercase__ : List[str] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : Dict = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Union[str, Any] = j return prefix_result def a_ ( _lowerCAmelCase : str ): '''simple docstring''' return max(prefix_function(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" import os import sys _UpperCamelCase : List[Any] = os.path.join(os.path.dirname(__file__), "src") sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) _UpperCamelCase : str = [ "torch", "numpy", "tokenizers", "filelock", "requests", "tqdm", "regex", "sentencepiece", "sacremoses", "importlib_metadata", "huggingface_hub", ] @add_start_docstrings(AutoConfig.__doc__ ) def a_ ( *_lowerCAmelCase : Any , **_lowerCAmelCase : Union[str, Any] ): '''simple docstring''' return AutoConfig.from_pretrained(*a_ , **a_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a_ ( *_lowerCAmelCase : str , **_lowerCAmelCase : Union[str, Any] ): '''simple docstring''' return AutoTokenizer.from_pretrained(*a_ , **a_ ) @add_start_docstrings(AutoModel.__doc__ ) def a_ ( *_lowerCAmelCase : List[str] , **_lowerCAmelCase : int ): '''simple docstring''' return AutoModel.from_pretrained(*a_ , **a_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a_ ( *_lowerCAmelCase : Any , **_lowerCAmelCase : Tuple ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*a_ , **a_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a_ ( *_lowerCAmelCase : Dict , **_lowerCAmelCase : Optional[Any] ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*a_ , **a_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a_ ( *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*a_ , **a_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a_ ( *_lowerCAmelCase : List[str] , **_lowerCAmelCase : int ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*a_ , **a_ )
718
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]: lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0} lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} lowercase__ : Optional[int] = parent lowercase__ : Optional[int] = batch_size lowercase__ : str = num_channels lowercase__ : Any = image_size lowercase__ : Optional[Any] = min_resolution lowercase__ : int = max_resolution lowercase__ : List[Any] = do_resize lowercase__ : List[str] = size lowercase__ : str = do_center_crop lowercase__ : List[Any] = crop_size lowercase__ : Union[str, Any] = do_flip_channel_order def _UpperCAmelCase ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Tuple = MobileViTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_center_crop' ) ) self.assertTrue(hasattr(a , 'center_crop' ) ) self.assertTrue(hasattr(a , 'do_flip_channel_order' ) ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> str: # Initialize image_processing lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Dict: # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
645
0
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : Optional[int] = logging.get_logger(__name__) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ): '''simple docstring''' lowercase__ : Tuple = original_name.split('.' )[0] lowercase__ : Tuple = key.split('.' ) lowercase__ : Optional[Any] = int(key_list[key_list.index(_A ) - 2] ) lowercase__ : int = int(key_list[key_list.index(_A ) - 1] ) lowercase__ : Any = orig_block_num - offset lowercase__ : List[Any] = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" ) return key def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' lowercase__ : List[Any] = OrderedDict() lowercase__ , lowercase__ : List[str] = 0, 0 for key, value in state_dict.items(): if key.startswith('network' ): lowercase__ : Optional[int] = key.replace('network' , 'poolformer.encoder' ) if "proj" in key: # Works for the first embedding as well as the internal embedding layers if key.endswith('bias' ) and "patch_embed" not in key: patch_emb_offset += 1 lowercase__ : str = key[: key.find('proj' )] lowercase__ : Union[str, Any] = key.replace(_A , f"""patch_embeddings.{total_embed_found}.""" ) lowercase__ : str = key.replace('proj' , 'projection' ) if key.endswith('bias' ): total_embed_found += 1 if "patch_embeddings" in key: lowercase__ : Tuple = 'poolformer.encoder.' + key if "mlp.fc1" in key: lowercase__ : Dict = replace_key_with_offset(_A , _A , 'mlp.fc1' , 'output.conv1' ) if "mlp.fc2" in key: lowercase__ : Any = replace_key_with_offset(_A , _A , 'mlp.fc2' , 'output.conv2' ) if "norm1" in key: lowercase__ : int = replace_key_with_offset(_A , _A , 'norm1' , 'before_norm' ) if "norm2" in key: lowercase__ : Dict = replace_key_with_offset(_A , _A , 'norm2' , 'after_norm' ) if "layer_scale_1" in key: lowercase__ : Optional[int] = replace_key_with_offset(_A , _A , 'layer_scale_1' , 'layer_scale_1' ) if "layer_scale_2" in key: lowercase__ : Union[str, Any] = replace_key_with_offset(_A , _A , 'layer_scale_2' , 'layer_scale_2' ) if "head" in key: lowercase__ : Any = key.replace('head' , 'classifier' ) lowercase__ : Tuple = value return new_state_dict def a_ ( ): '''simple docstring''' lowercase__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase__ : int = Image.open(requests.get(_A , stream=_A ).raw ) return image @torch.no_grad() def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ): '''simple docstring''' lowercase__ : Any = PoolFormerConfig() # set attributes based on model_name lowercase__ : Any = 'huggingface/label-files' lowercase__ : Optional[int] = model_name[-3:] lowercase__ : Optional[Any] = 1000 lowercase__ : Tuple = 'imagenet-1k-id2label.json' lowercase__ : Optional[Any] = (1, 1000) # set config attributes lowercase__ : Any = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) ) lowercase__ : Dict = {int(_A ): v for k, v in idalabel.items()} lowercase__ : Union[str, Any] = idalabel lowercase__ : List[str] = {v: k for k, v in idalabel.items()} if size == "s12": lowercase__ : List[str] = [2, 2, 6, 2] lowercase__ : Dict = [64, 128, 320, 512] lowercase__ : Any = 4.0 lowercase__ : Optional[Any] = 0.9 elif size == "s24": lowercase__ : Optional[Any] = [4, 4, 12, 4] lowercase__ : List[Any] = [64, 128, 320, 512] lowercase__ : Optional[int] = 4.0 lowercase__ : int = 0.9 elif size == "s36": lowercase__ : Dict = [6, 6, 18, 6] lowercase__ : Optional[Any] = [64, 128, 320, 512] lowercase__ : int = 4.0 lowercase__ : str = 1E-6 lowercase__ : Any = 0.9 elif size == "m36": lowercase__ : List[Any] = [6, 6, 18, 6] lowercase__ : List[str] = [96, 192, 384, 768] lowercase__ : Union[str, Any] = 4.0 lowercase__ : int = 1E-6 lowercase__ : Union[str, Any] = 0.9_5 elif size == "m48": lowercase__ : str = [8, 8, 24, 8] lowercase__ : Union[str, Any] = [96, 192, 384, 768] lowercase__ : Optional[Any] = 4.0 lowercase__ : Any = 1E-6 lowercase__ : Dict = 0.9_5 else: raise ValueError(f"""Size {size} not supported""" ) # load image processor lowercase__ : Optional[int] = PoolFormerImageProcessor(crop_pct=_A ) # Prepare image lowercase__ : Any = prepare_img() lowercase__ : Union[str, Any] = image_processor(images=_A , return_tensors='pt' ).pixel_values logger.info(f"""Converting model {model_name}...""" ) # load original state dict lowercase__ : Dict = torch.load(_A , map_location=torch.device('cpu' ) ) # rename keys lowercase__ : Dict = rename_keys(_A ) # create HuggingFace model and load state dict lowercase__ : Dict = PoolFormerForImageClassification(_A ) model.load_state_dict(_A ) model.eval() # Define image processor lowercase__ : Union[str, Any] = PoolFormerImageProcessor(crop_pct=_A ) lowercase__ : Any = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values # forward pass lowercase__ : Tuple = model(_A ) lowercase__ : Any = outputs.logits # define expected logit slices for different models if size == "s12": lowercase__ : Any = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] ) elif size == "s24": lowercase__ : Tuple = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] ) elif size == "s36": lowercase__ : List[str] = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] ) elif size == "m36": lowercase__ : Tuple = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] ) elif size == "m48": lowercase__ : Tuple = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] ) else: raise ValueError(f"""Size {size} not supported""" ) # verify logits assert logits.shape == expected_shape assert torch.allclose(logits[0, :3] , _A , atol=1E-2 ) # finally, save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_A ).mkdir(exist_ok=_A ) model.save_pretrained(_A ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_A ) if __name__ == "__main__": _UpperCamelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument( "--model_name", default="poolformer_s12", type=str, help="Name of the model you\'d like to convert.", ) parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) _UpperCamelCase : Union[str, Any] = parser.parse_args() convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
719
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict: lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : List[Any] = seq_length lowercase__ : int = is_training lowercase__ : str = use_attention_mask lowercase__ : Dict = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : Tuple = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Dict = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[str] = type_sequence_label_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Optional[int] = num_choices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : List[str] = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Any = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self ) @slow def _UpperCAmelCase ( self ) -> str: for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' ) lowercase__ : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' ) lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__ : Any = model(a , attention_mask=a )[0] lowercase__ : Tuple = (1, 1_1, 7_6_8) self.assertEqual(output.shape , a ) lowercase__ : Optional[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ): '''simple docstring''' lowercase__ : Optional[Any] = (boundary[1] - boundary[0]) / steps lowercase__ : Optional[int] = boundary[0] lowercase__ : int = boundary[1] lowercase__ : List[str] = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Optional[int] = 0.0 y += (h / 2.0) * f(_lowerCAmelCase ) for i in x_i: # print(i) y += h * f(_lowerCAmelCase ) y += (h / 2.0) * f(_lowerCAmelCase ) return y def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ : List[Any] = a + h while x < (b - h): yield x lowercase__ : Tuple = x + h def a_ ( _lowerCAmelCase : Union[str, Any] ): # enter your function here '''simple docstring''' lowercase__ : Any = (x - 0) * (x - 0) return y def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = 0.0 # Lower bound of integration lowercase__ : List[Any] = 1.0 # Upper bound of integration lowercase__ : str = 1_0.0 # define number of steps or resolution lowercase__ : Dict = [a, b] # define boundary of integration lowercase__ : int = method_a(_lowerCAmelCase , _lowerCAmelCase ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
720
"""simple docstring""" from collections.abc import Sequence def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' lowercase__ : int = 0.0 for coeff in reversed(_lowerCAmelCase ): lowercase__ : List[Any] = result * x + coeff return result if __name__ == "__main__": _UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0) _UpperCamelCase : Dict = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) _UpperCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = ['''ViTFeatureExtractor'''] _UpperCamelCase : str = ['''ViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : int = [ '''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ViTForImageClassification''', '''ViTForMaskedImageModeling''', '''ViTModel''', '''ViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Any = [ '''TFViTForImageClassification''', '''TFViTModel''', '''TFViTPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[str] = [ '''FlaxViTForImageClassification''', '''FlaxViTModel''', '''FlaxViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys _UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
721
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _UpperCamelCase : Any = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def a_ ( _lowerCAmelCase : Optional[Any]=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a)) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = None def _UpperCAmelCase ( self , a , a ) -> List[Any]: with TemporaryDirectory() as tmp_dir: lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a ) lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a ) self.assertTrue(os.path.exists(a ) ) @pytest.mark.integration def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : Optional[int] = import_main_class(dataset_module.module_path ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowercase__ : Optional[int] = None builder_instance.download_and_prepare() lowercase__ : Optional[int] = builder_instance.as_dataset() assert ds @pytest.mark.integration def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert "train" in ds assert isinstance(ds['train'] , _lowerCAmelCase ) assert next(iter(ds['train'] ) )
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Any = abs(snake_case__ ) lowercase__ : Optional[Any] = 0 while n > 0: res += n % 10 n //= 10 return res def a_ ( _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : List[str] = abs(snake_case__ ) return n if n < 10 else n % 10 + sum_of_digits(n // 10 ) def a_ ( _lowerCAmelCase : int ): '''simple docstring''' return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) ) def a_ ( ): '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(_lowerCAmelCase : Callable , _lowerCAmelCase : int ) -> None: lowercase__ : str = f"""{func.__name__}({value})""" lowercase__ : Any = timeit(f"""__main__.{call}""" , setup='import __main__' ) print(f"""{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds""" ) for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(snake_case__ , snake_case__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
700
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a_ ( _lowerCAmelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): '''simple docstring''' lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_lowerCAmelCase , _lowerCAmelCase ) # Predict target for test data lowercase__ : str = xgb.predict(_lowerCAmelCase ) lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 ) return predictions def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = fetch_california_housing() lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split( _lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 ) lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
645
0
"""simple docstring""" import os def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = os.path.dirname(os.path.realpath(__SCREAMING_SNAKE_CASE ) ) lowercase__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , 'triangle.txt' ) with open(__SCREAMING_SNAKE_CASE ) as f: lowercase__ : List[str] = f.readlines() lowercase__ : List[str] = [] for line in triangle: lowercase__ : int = [] for number in line.strip().split(' ' ): numbers_from_line.append(int(__SCREAMING_SNAKE_CASE ) ) a.append(__SCREAMING_SNAKE_CASE ) for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): for j in range(len(a[i] ) ): lowercase__ : Optional[int] = a[i - 1][j] if j != len(a[i - 1] ) else 0 lowercase__ : Optional[int] = a[i - 1][j - 1] if j > 0 else 0 a[i][j] += max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return max(a[-1] ) if __name__ == "__main__": print(solution())
701
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]: lowercase__ : str = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Dict = patch_size lowercase__ : Tuple = tubelet_size lowercase__ : Optional[int] = num_frames lowercase__ : Optional[int] = is_training lowercase__ : int = use_labels lowercase__ : Optional[int] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = mask_ratio lowercase__ : Optional[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase__ : Optional[Any] = (image_size // patch_size) ** 2 lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase__ : str = int(mask_ratio * self.seq_length ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : Dict = VideoMAEModel(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = VideoMAEForPreTraining(a ) model.to(a ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Any = torch.ones((self.num_masks,) ) lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase__ : str = model(a , a ) # model only returns predictions for masked patches lowercase__ : str = mask.sum().item() lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowerCamelCase__ : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = False def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = VideoMAEModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]: lowercase__ : Union[str, Any] = copy.deepcopy(a ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase__ : Union[str, Any] = bool_masked_pos.to(a ) if return_labels: if model_class in [ *get_values(a ), ]: lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.has_attentions: pass else: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase__ : Optional[Any] = True lowercase__ : int = False lowercase__ : Any = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Dict = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : List[str] = len(a ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : List[str] = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) ) self.assertEqual(out_len + 1 , len(a ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a ) , a ) lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True check_hidden_states_output(a , a , a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> List[Any]: pass def a_ ( ): '''simple docstring''' lowercase__ : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase__ : str = np.load(_lowerCAmelCase ) return list(_lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( a ) lowercase__ : str = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : Union[str, Any] = model(**a ) # verify the logits lowercase__ : str = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a ) # add boolean mask, indicating which patches to mask lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) lowercase__ : str = torch.load(a ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**a ) # verify the logits lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase__ : List[str] = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a ) self.assertEqual(outputs.logits.shape , a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to( a ) with torch.no_grad(): lowercase__ : Any = model(**a ) lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
645
0
"""simple docstring""" import warnings from typing import Dict import numpy as np from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING def a_ ( _lowerCAmelCase : Optional[int] ): '''simple docstring''' return 1.0 / (1.0 + np.exp(-_outputs )) def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : int = np.max(_outputs , axis=-1 , keepdims=_lowerCAmelCase ) lowercase__ : List[Any] = np.exp(_outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCAmelCase ) class UpperCAmelCase_ ( _a): lowerCamelCase__ : Tuple = "sigmoid" lowerCamelCase__ : int = "softmax" lowerCamelCase__ : Union[str, Any] = "none" @add_end_docstrings( _a , R"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , ) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = False lowerCamelCase__ : Optional[Any] = ClassificationFunction.NONE def __init__( self , **a ) -> List[str]: super().__init__(**a ) self.check_model_type( TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING ) def _UpperCAmelCase ( self , a=None , a=None , a="" , **a ) -> Optional[Any]: # Using "" as default argument because we're going to use `top_k=None` in user code to declare # "No top_k" lowercase__ : Optional[int] = tokenizer_kwargs lowercase__ : Any = {} if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None: lowercase__ : Union[str, Any] = self.model.config.return_all_scores if isinstance(a , a ) or top_k is None: lowercase__ : str = top_k lowercase__ : str = False elif return_all_scores is not None: warnings.warn( '`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of' ' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , ) if return_all_scores: lowercase__ : int = None else: lowercase__ : List[str] = 1 if isinstance(a , a ): lowercase__ : List[Any] = ClassificationFunction[function_to_apply.upper()] if function_to_apply is not None: lowercase__ : Any = function_to_apply return preprocess_params, {}, postprocess_params def __call__( self , *a , **a ) -> Dict: lowercase__ : Tuple = super().__call__(*a , **a ) # TODO try and retrieve it in a nicer way from _sanitize_parameters. lowercase__ : Optional[int] = 'top_k' not in kwargs if isinstance(args[0] , a ) and _legacy: # This pipeline is odd, and return a list when single item is run return [result] else: return result def _UpperCAmelCase ( self , a , **a ) -> Dict[str, GenericTensor]: lowercase__ : str = self.framework if isinstance(a , a ): return self.tokenizer(**a , return_tensors=a , **a ) elif isinstance(a , a ) and len(a ) == 1 and isinstance(inputs[0] , a ) and len(inputs[0] ) == 2: # It used to be valid to use a list of list of list for text pairs, keeping this path for BC return self.tokenizer( text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a ) elif isinstance(a , a ): # This is likely an invalid usage of the pipeline attempting to pass text pairs. raise ValueError( 'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a' ' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' ) return self.tokenizer(a , return_tensors=a , **a ) def _UpperCAmelCase ( self , a ) -> List[Any]: return self.model(**a ) def _UpperCAmelCase ( self , a , a=None , a=1 , a=True ) -> List[Any]: # `_legacy` is used to determine if we're running the naked pipeline and in backward # compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running # the more natural result containing the list. # Default value before `set_parameters` if function_to_apply is None: if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1: lowercase__ : List[Any] = ClassificationFunction.SIGMOID elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1: lowercase__ : Optional[int] = ClassificationFunction.SOFTMAX elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None: lowercase__ : Tuple = self.model.config.function_to_apply else: lowercase__ : List[str] = ClassificationFunction.NONE lowercase__ : Any = model_outputs['logits'][0] lowercase__ : List[str] = outputs.numpy() if function_to_apply == ClassificationFunction.SIGMOID: lowercase__ : Tuple = sigmoid(a ) elif function_to_apply == ClassificationFunction.SOFTMAX: lowercase__ : Optional[Any] = softmax(a ) elif function_to_apply == ClassificationFunction.NONE: lowercase__ : Any = outputs else: raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" ) if top_k == 1 and _legacy: return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()} lowercase__ : Optional[int] = [ {'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a ) ] if not _legacy: dict_scores.sort(key=lambda a : x["score"] , reverse=a ) if top_k is not None: lowercase__ : List[Any] = dict_scores[:top_k] return dict_scores
702
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Dict = value elif weight_type == "weight_v": lowercase__ : List[str] = value elif weight_type == "bias": lowercase__ : Optional[Any] = value else: lowercase__ : List[str] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = [] lowercase__ : List[str] = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue lowercase__ : int = True if "*" in mapped_key: lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : List[Any] = 'weight_g' elif "weight_v" in name: lowercase__ : int = 'weight_v' elif "bias" in name: lowercase__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Union[str, Any] = 'weight' else: lowercase__ : int = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : int = full_name.split('conv_layers.' )[-1] lowercase__ : int = name.split('.' ) lowercase__ : int = int(items[0] ) lowercase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase__ : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ): '''simple docstring''' if config_path is not None: lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: lowercase__ : Any = UniSpeechSatConfig() lowercase__ : Union[str, Any] = '' if is_finetuned: lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase ) else: lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
645
0
"""simple docstring""" from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class UpperCAmelCase_ ( UpperCamelCase_): @slow @require_torch def _UpperCAmelCase ( self ) -> Any: lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' ) lowercase__ : Optional[Any] = BertTokenizer.from_pretrained('bert-base-uncased' ) lowercase__ : Any = bertabert.config.encoder.vocab_size lowercase__ : List[str] = tokenizer.sep_token_id lowercase__ : Any = tokenizer.cls_token_id lowercase__ : Tuple = 1_2_8 lowercase__ : str = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' ) lowercase__ : List[str] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' ) lowercase__ : Optional[int] = train_dataset.select(range(3_2 ) ) lowercase__ : Tuple = val_dataset.select(range(1_6 ) ) lowercase__ : List[str] = 4 def _map_to_encoder_decoder_inputs(a ): # Tokenizer will automatically set [BOS] <text> [EOS] lowercase__ : Dict = tokenizer(batch['article'] , padding='max_length' , truncation=__A , max_length=5_1_2 ) lowercase__ : List[str] = tokenizer(batch['highlights'] , padding='max_length' , truncation=__A , max_length=1_2_8 ) lowercase__ : str = inputs.input_ids lowercase__ : int = inputs.attention_mask lowercase__ : Tuple = outputs.input_ids lowercase__ : Dict = outputs.input_ids.copy() lowercase__ : Optional[Any] = [ [-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"] ] lowercase__ : List[Any] = outputs.attention_mask assert all(len(__A ) == 5_1_2 for x in inputs.input_ids ) assert all(len(__A ) == 1_2_8 for x in outputs.input_ids ) return batch def _compute_metrics(a ): lowercase__ : Dict = pred.label_ids lowercase__ : Any = pred.predictions # all unnecessary tokens are removed lowercase__ : str = tokenizer.batch_decode(__A , skip_special_tokens=__A ) lowercase__ : Any = tokenizer.batch_decode(__A , skip_special_tokens=__A ) lowercase__ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A ) return {"accuracy": accuracy} # map train dataset lowercase__ : Tuple = train_dataset.map( _map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['article', 'highlights'] , ) train_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) # same for validation dataset lowercase__ : int = val_dataset.map( _map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=['article', 'highlights'] , ) val_dataset.set_format( type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , ) lowercase__ : Dict = self.get_auto_remove_tmp_dir() lowercase__ : List[str] = SeqaSeqTrainingArguments( output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy='steps' , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , ) # instantiate trainer lowercase__ : Dict = SeqaSeqTrainer( model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , ) # start training trainer.train()
703
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int: lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Dict = image_size lowercase__ : str = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[str] = embed_dim lowercase__ : Any = depths lowercase__ : Dict = num_heads lowercase__ : List[str] = window_size lowercase__ : int = mlp_ratio lowercase__ : Tuple = qkv_bias lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Tuple = drop_path_rate lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = use_absolute_embeddings lowercase__ : Optional[Any] = patch_norm lowercase__ : Any = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : List[str] = is_training lowercase__ : int = scope lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = type_sequence_label_size lowercase__ : List[str] = encoder_stride lowercase__ : Optional[Any] = out_features lowercase__ : Dict = out_indices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Tuple = MaskFormerSwinModel(config=a ) model.to(a ) model.eval() lowercase__ : str = model(a ) lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a ) model.to(a ) model.eval() lowercase__ : int = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(a ): lowercase__ : Dict = ['stem'] lowercase__ : List[str] = MaskFormerSwinBackbone(config=a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase__ : str = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Any = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = MaskFormerSwinModelTester(self ) lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> str: return def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) @unittest.skip('Swin does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip('Swin does not support feedforward chunking' ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> str: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) lowercase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def _UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple: lowercase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swin has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = 3 lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : int = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(a ): lowercase__ : Union[str, Any] = 0 return t def check_equivalence(a , a , a , a={} ): with torch.no_grad(): lowercase__ : Optional[Any] = model(**a , return_dict=a , **a ) lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple() def recursive_check(a , a ): if isinstance(a , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a , a ): recursive_check(a , a ) elif isinstance(a , a ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(a , a ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has""" f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}.""" ) , ) recursive_check(a , a ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) model.to(a ) model.eval() lowercase__ : Tuple = self._prepare_for_class(a , a ) lowercase__ : Optional[Any] = self._prepare_for_class(a , a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a ) lowercase__ : int = self._prepare_for_class(a , a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _a): lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[int] = MaskFormerSwinModelTester(self ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: lowercase__ : Optional[Any] = backbone_class(a ) backbone.to(a ) backbone.eval() lowercase__ : Union[str, Any] = backbone(**a ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , a ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowercase__ : List[str] = backbone(**a , output_hidden_states=a ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowercase__ : List[Any] = backbone(**a , output_attentions=a ) self.assertIsNotNone(outputs.attentions )
645
0
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Any, Callable, Dict, List, Optional, Union import pandas as pd import pyarrow as pa import datasets import datasets.config from datasets.features.features import require_storage_cast from datasets.table import table_cast from datasets.utils.py_utils import Literal _UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__) _UpperCamelCase : Tuple = ["names", "prefix"] _UpperCamelCase : str = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"] _UpperCamelCase : List[Any] = ["encoding_errors", "on_bad_lines"] _UpperCamelCase : List[Any] = ["date_format"] @dataclass class UpperCAmelCase_ ( datasets.BuilderConfig): lowerCamelCase__ : Tuple = "," lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : Union[str, Any] = "infer" lowerCamelCase__ : Tuple = None lowerCamelCase__ : Tuple = None lowerCamelCase__ : Any = None lowerCamelCase__ : int = None lowerCamelCase__ : Any = None lowerCamelCase__ : int = True lowerCamelCase__ : int = None lowerCamelCase__ : Optional[int] = None lowerCamelCase__ : List[Any] = None lowerCamelCase__ : Optional[Any] = None lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : List[str] = None lowerCamelCase__ : str = None lowerCamelCase__ : int = None lowerCamelCase__ : List[str] = True lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : List[Any] = False lowerCamelCase__ : List[Any] = True lowerCamelCase__ : Any = None lowerCamelCase__ : Dict = "." lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = "\"" lowerCamelCase__ : List[Any] = 0 lowerCamelCase__ : Dict = None lowerCamelCase__ : Dict = None lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : Union[str, Any] = None lowerCamelCase__ : Any = True lowerCamelCase__ : Any = True lowerCamelCase__ : List[Any] = 0 lowerCamelCase__ : Optional[Any] = True lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = None lowerCamelCase__ : List[str] = 1_0_0_0_0 lowerCamelCase__ : Dict = None lowerCamelCase__ : Optional[Any] = "strict" lowerCamelCase__ : Optional[int] = "error" lowerCamelCase__ : Optional[Any] = None def _UpperCAmelCase ( self ) -> int: if self.delimiter is not None: lowercase__ : Optional[Any] = self.delimiter if self.column_names is not None: lowercase__ : int = self.column_names @property def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = { "sep": self.sep, "header": self.header, "names": self.names, "index_col": self.index_col, "usecols": self.usecols, "prefix": self.prefix, "mangle_dupe_cols": self.mangle_dupe_cols, "engine": self.engine, "converters": self.converters, "true_values": self.true_values, "false_values": self.false_values, "skipinitialspace": self.skipinitialspace, "skiprows": self.skiprows, "nrows": self.nrows, "na_values": self.na_values, "keep_default_na": self.keep_default_na, "na_filter": self.na_filter, "verbose": self.verbose, "skip_blank_lines": self.skip_blank_lines, "thousands": self.thousands, "decimal": self.decimal, "lineterminator": self.lineterminator, "quotechar": self.quotechar, "quoting": self.quoting, "escapechar": self.escapechar, "comment": self.comment, "encoding": self.encoding, "dialect": self.dialect, "error_bad_lines": self.error_bad_lines, "warn_bad_lines": self.warn_bad_lines, "skipfooter": self.skipfooter, "doublequote": self.doublequote, "memory_map": self.memory_map, "float_precision": self.float_precision, "chunksize": self.chunksize, "encoding_errors": self.encoding_errors, "on_bad_lines": self.on_bad_lines, "date_format": self.date_format, } # some kwargs must not be passed if they don't have a default value # some others are deprecated and we can also not pass them if they are the default value for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS: if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , a ): del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 2.0 new arguments if not (datasets.config.PANDAS_VERSION.major >= 2): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] # Remove 1.3 new arguments if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3): for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS: del pd_read_csv_kwargs[pd_read_csv_parameter] return pd_read_csv_kwargs class UpperCAmelCase_ ( datasets.ArrowBasedBuilder): lowerCamelCase__ : List[Any] = CsvConfig def _UpperCAmelCase ( self ) -> int: return datasets.DatasetInfo(features=self.config.features ) def _UpperCAmelCase ( self , a ) -> int: if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowercase__ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files ) if isinstance(a , (str, list, tuple) ): lowercase__ : List[str] = data_files if isinstance(a , a ): lowercase__ : str = [files] lowercase__ : Union[str, Any] = [dl_manager.iter_files(a ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] lowercase__ : str = [] for split_name, files in data_files.items(): if isinstance(a , a ): lowercase__ : Any = [files] lowercase__ : int = [dl_manager.iter_files(a ) for file in files] splits.append(datasets.SplitGenerator(name=a , gen_kwargs={'files': files} ) ) return splits def _UpperCAmelCase ( self , a ) -> pa.Table: if self.config.features is not None: lowercase__ : Tuple = self.config.features.arrow_schema if all(not require_storage_cast(a ) for feature in self.config.features.values() ): # cheaper cast lowercase__ : int = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=a ) else: # more expensive cast; allows str <-> int/float or str to Audio for example lowercase__ : int = table_cast(a , a ) return pa_table def _UpperCAmelCase ( self , a ) -> Union[str, Any]: lowercase__ : List[str] = self.config.features.arrow_schema if self.config.features else None # dtype allows reading an int column as str lowercase__ : Optional[int] = ( { name: dtype.to_pandas_dtype() if not require_storage_cast(a ) else object for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() ) } if schema is not None else None ) for file_idx, file in enumerate(itertools.chain.from_iterable(a ) ): lowercase__ : List[Any] = pd.read_csv(a , iterator=a , dtype=a , **self.config.pd_read_csv_kwargs ) try: for batch_idx, df in enumerate(a ): lowercase__ : List[str] = pa.Table.from_pandas(a ) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield (file_idx, batch_idx), self._cast_table(a ) except ValueError as e: logger.error(f"""Failed to read file '{file}' with error {type(a )}: {e}""" ) raise
704
"""simple docstring""" import math def a_ ( _lowerCAmelCase : int = 100 ): '''simple docstring''' lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) ) lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
645
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _UpperCamelCase : Optional[int] = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } _UpperCamelCase : List[Any] = { "squeezebert/squeezebert-uncased": 5_12, "squeezebert/squeezebert-mnli": 5_12, "squeezebert/squeezebert-mnli-headless": 5_12, } _UpperCamelCase : List[str] = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE): lowerCamelCase__ : Dict = VOCAB_FILES_NAMES lowerCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Dict = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : Optional[Any] = SqueezeBertTokenizer def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Any: super().__init__( _a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , ) lowercase__ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _a ) != do_lower_case or normalizer_state.get('strip_accents' , _a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars ): lowercase__ : List[Any] = getattr(_a , normalizer_state.pop('type' ) ) lowercase__ : Union[str, Any] = do_lower_case lowercase__ : List[str] = strip_accents lowercase__ : Dict = tokenize_chinese_chars lowercase__ : List[str] = normalizer_class(**_a ) lowercase__ : Any = do_lower_case def _UpperCAmelCase ( self , a , a=None ) -> Dict: lowercase__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self , a , a = None ) -> List[Any]: lowercase__ : Dict = [self.sep_token_id] lowercase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , a , a = None ) -> List[str]: lowercase__ : Union[str, Any] = self._tokenizer.model.save(_a , name=_a ) return tuple(_a )
705
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : List[Any] = controlnet_params lowercase__ : int = 'bird' lowercase__ : List[Any] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples ) lowercase__ : List[Any] = jax.random.PRNGKey(0 ) lowercase__ : Tuple = jax.random.split(a , jax.device_count() ) lowercase__ : str = replicate(a ) lowercase__ : List[str] = shard(a ) lowercase__ : Dict = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : Optional[Any] = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : Optional[Any] = controlnet_params lowercase__ : List[Any] = 'Chef in the kitchen' lowercase__ : List[str] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) lowercase__ : List[str] = jax.random.PRNGKey(0 ) lowercase__ : str = jax.random.split(a , jax.device_count() ) lowercase__ : Optional[Any] = replicate(a ) lowercase__ : Optional[Any] = shard(a ) lowercase__ : List[Any] = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : str = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
645
0
"""simple docstring""" import os import sys import unittest _UpperCamelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path _UpperCamelCase : Union[str, Any] = os.path.join(git_repo_path, "src", "diffusers") class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = find_backend(' if not is_torch_available():' ) self.assertEqual(a_ , 'torch' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") lowercase__ : Any = find_backend(' if not (is_torch_available() and is_transformers_available()):' ) self.assertEqual(a_ , 'torch_and_transformers' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") lowercase__ : Union[str, Any] = find_backend( ' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):' ) self.assertEqual(a_ , 'torch_and_transformers_and_onnx' ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Dict = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('torch' , a_ ) self.assertIn('torch_and_transformers' , a_ ) self.assertIn('flax_and_transformers' , a_ ) self.assertIn('torch_and_transformers_and_onnx' , a_ ) # Likewise, we can't assert on the exact content of a key self.assertIn('UNet2DModel' , objects['torch'] ) self.assertIn('FlaxUNet2DConditionModel' , objects['flax'] ) self.assertIn('StableDiffusionPipeline' , objects['torch_and_transformers'] ) self.assertIn('FlaxStableDiffusionPipeline' , objects['flax_and_transformers'] ) self.assertIn('LMSDiscreteScheduler' , objects['torch_and_scipy'] ) self.assertIn('OnnxStableDiffusionPipeline' , objects['torch_and_transformers_and_onnx'] ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = create_dummy_object('CONSTANT' , '\'torch\'' ) self.assertEqual(a_ , '\nCONSTANT = None\n' ) lowercase__ : Optional[int] = create_dummy_object('function' , '\'torch\'' ) self.assertEqual( a_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' ) lowercase__ : List[Any] = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, 'torch') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, 'torch') """ lowercase__ : Union[str, Any] = create_dummy_object('FakeClass' , '\'torch\'' ) self.assertEqual(a_ , a_ ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Union[str, Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, [\"torch\"]) """ lowercase__ : List[Any] = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} ) self.assertEqual(dummy_files['torch'] , a_ )
706
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
645
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Union[str, Any] = tempfile.mkdtemp() lowercase__ : Tuple = BlipImageProcessor() lowercase__ : Optional[int] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' ) lowercase__ : str = BlipaProcessor(__lowerCAmelCase , __lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self , **a ) -> List[Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).tokenizer def _UpperCAmelCase ( self , **a ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase ).image_processor def _UpperCAmelCase ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[int] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] lowercase__ : List[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> int: lowercase__ : str = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) lowercase__ : Any = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 ) lowercase__ : Optional[Any] = BlipaProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__lowerCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __lowerCAmelCase ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Dict = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase__ : Optional[Any] = self.prepare_image_inputs() lowercase__ : Tuple = image_processor(__lowerCAmelCase , return_tensors='np' ) lowercase__ : List[str] = processor(images=__lowerCAmelCase , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Optional[Any] = self.get_image_processor() lowercase__ : Dict = self.get_tokenizer() lowercase__ : Any = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase__ : Optional[Any] = 'lower newer' lowercase__ : Optional[int] = processor(text=__lowerCAmelCase ) lowercase__ : Union[str, Any] = tokenizer(__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Optional[int] = self.get_tokenizer() lowercase__ : List[str] = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase__ : Any = 'lower newer' lowercase__ : Optional[Any] = self.prepare_image_inputs() lowercase__ : Tuple = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) # test if it raises when no input is passed with pytest.raises(__lowerCAmelCase ): processor() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : int = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : str = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : int = processor.batch_decode(__lowerCAmelCase ) lowercase__ : Optional[Any] = tokenizer.batch_decode(__lowerCAmelCase ) self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : int = self.get_image_processor() lowercase__ : Union[str, Any] = self.get_tokenizer() lowercase__ : Tuple = BlipaProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase ) lowercase__ : Dict = 'lower newer' lowercase__ : Optional[int] = self.prepare_image_inputs() lowercase__ : Dict = processor(text=__lowerCAmelCase , images=__lowerCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
707
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' ) lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' ) lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(**a ) lowercase__ : List[Any] = outputs.logits lowercase__ : Union[str, Any] = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , a ) lowercase__ : Tuple = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
645
0
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def a_ ( _lowerCAmelCase : Any ): for param in module.parameters(): lowercase__ : Optional[Any] = False def a_ ( ): lowercase__ : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu""" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): lowercase__ : Union[str, Any] = """mps""" if device == "mps": print( 'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch' ' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues' ' with generations.' ) return device def a_ ( _lowerCAmelCase : Union[str, Any] ): lowercase__ : Union[str, Any] = plt.imshow(UpperCAmelCase__ ) fig.axes.get_xaxis().set_visible(UpperCAmelCase__ ) fig.axes.get_yaxis().set_visible(UpperCAmelCase__ ) plt.show() def a_ ( ): lowercase__ : Dict = datetime.now() lowercase__ : Optional[Any] = current_time.strftime('%H:%M:%S' ) return timestamp
708
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : @staticmethod def _UpperCAmelCase ( *a , **a ) -> int: pass def a_ ( _lowerCAmelCase : Image ): '''simple docstring''' lowercase__ : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , a , a ) -> Optional[int]: lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a ) import datasets lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowercase__ : List[Any] = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , a , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Tuple = 'Intel/dpt-large' lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a ) lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowercase__ : Optional[Any] = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: # This is highly irregular to have no small tests. self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase : Optional[int] = { '''configuration_longformer''': [ '''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongformerConfig''', '''LongformerOnnxConfig''', ], '''tokenization_longformer''': ['''LongformerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[str] = ['''LongformerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Any = [ '''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongformerForMaskedLM''', '''LongformerForMultipleChoice''', '''LongformerForQuestionAnswering''', '''LongformerForSequenceClassification''', '''LongformerForTokenClassification''', '''LongformerModel''', '''LongformerPreTrainedModel''', '''LongformerSelfAttention''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : int = [ '''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFLongformerForMaskedLM''', '''TFLongformerForMultipleChoice''', '''TFLongformerForQuestionAnswering''', '''TFLongformerForSequenceClassification''', '''TFLongformerForTokenClassification''', '''TFLongformerModel''', '''TFLongformerPreTrainedModel''', '''TFLongformerSelfAttention''', ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys _UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
709
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class UpperCAmelCase_ ( _a): def __init__( self ) -> Any: lowercase__ : Tuple = [] def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_init_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]: self.events.append('on_train_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_train_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_epoch_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]: self.events.append('on_epoch_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_step_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> str: self.events.append('on_step_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_evaluate' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple: self.events.append('on_predict' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]: self.events.append('on_save' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_log' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_prediction_step' ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> str: lowercase__ : str = tempfile.mkdtemp() def _UpperCAmelCase ( self ) -> Dict: shutil.rmtree(self.output_dir ) def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowercase__ : str = RegressionDataset(length=a ) lowercase__ : Any = RegressionDataset(length=a ) lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a ) lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a ) lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a ) return Trainer( a , a , train_dataset=a , eval_dataset=a , callbacks=a , ) def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]: self.assertEqual(len(a ) , len(a ) ) # Order doesn't matter lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) for cba, cba in zip(a , a ): if isinstance(a , a ) and isinstance(a , a ): self.assertEqual(a , a ) elif isinstance(a , a ) and not isinstance(a , a ): self.assertEqual(a , cba.__class__ ) elif not isinstance(a , a ) and isinstance(a , a ): self.assertEqual(cba.__class__ , a ) else: self.assertEqual(a , a ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: lowercase__ : Dict = ['on_init_end', 'on_train_begin'] lowercase__ : List[Any] = 0 lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() ) lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(a ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : int = self.get_trainer() lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # Callbacks passed at init are added to the default callbacks lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a ) lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : List[str] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Optional[Any] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(a ) self.assertEqual(cb.__class__ , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # We can also add, pop, or remove by instance lowercase__ : int = self.get_trainer() lowercase__ : List[str] = trainer.callback_handler.callbacks[0] trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Tuple = self.get_trainer() lowercase__ : Dict = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(a ) self.assertEqual(a , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Tuple: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=a ) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # Independent log/save/eval lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: lowercase__ : str = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(a ) in warn_mock.call_args[0][0]
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _UpperCamelCase : Optional[int] ={ '''configuration_instructblip''': [ '''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''InstructBlipConfig''', '''InstructBlipQFormerConfig''', '''InstructBlipVisionConfig''', ], '''processing_instructblip''': ['''InstructBlipProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[Any] =[ '''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''', '''InstructBlipQFormerModel''', '''InstructBlipPreTrainedModel''', '''InstructBlipForConditionalGeneration''', '''InstructBlipVisionModel''', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys _UpperCamelCase : Tuple =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
710
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _UpperCamelCase : str = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
645
0
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCamelCase : List[Any] = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase : Any = direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCamelCase : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING _UpperCamelCase : Union[str, Any] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : int = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"""config.{attribute}""" in modeling_source or f"""getattr(config, \"{attribute}\"""" in modeling_source or f"""getattr(self.config, \"{attribute}\"""" in modeling_source ): lowercase__ : Optional[Any] = True # Deal with multi-line cases elif ( re.search( Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , _snake_case , ) is not None ): lowercase__ : Any = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowercase__ : List[str] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowercase__ : str = [ 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_size', 'use_cache', 'out_features', 'out_indices', ] lowercase__ : Dict = ['encoder_no_repeat_ngram_size'] # Special cases to be allowed lowercase__ : Any = True if not attribute_used: lowercase__ : str = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowercase__ : Dict = True elif attribute in ["tie_word_embeddings"] and default_value is False: lowercase__ : Dict = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowercase__ : List[str] = True elif attribute.endswith('_token_id' ): lowercase__ : int = True # configuration class specific cases if not case_allowed: lowercase__ : Optional[int] = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowercase__ : Tuple = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def a_ ( _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : Optional[int] = dict(inspect.signature(config_class.__init__ ).parameters ) lowercase__ : Dict = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']] lowercase__ : Any = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowercase__ : Dict = {} if len(config_class.attribute_map ) > 0: lowercase__ : Optional[Any] = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowercase__ : str = inspect.getsourcefile(_snake_case ) lowercase__ : Tuple = os.path.dirname(_snake_case ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowercase__ : str = [os.path.join(_snake_case , _snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith('modeling_' )] # Get the source code strings lowercase__ : int = [] for path in modeling_paths: if os.path.isfile(_snake_case ): with open(_snake_case ) as fp: modeling_sources.append(fp.read() ) lowercase__ : List[Any] = [] for config_param, default_value in zip(_snake_case , _snake_case ): # `attributes` here is all the variant names for `config_param` lowercase__ : List[str] = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_snake_case , _snake_case , _snake_case , _snake_case ): unused_attributes.append(attributes[0] ) return sorted(_snake_case ) def a_ ( ): '''simple docstring''' lowercase__ : List[Any] = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowercase__ : List[Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(_snake_case ) and issubclass(_snake_case , _snake_case ) and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowercase__ : Any = check_config_attributes_being_used(_snake_case ) if len(_snake_case ) > 0: lowercase__ : List[Any] = unused_attributes if len(_snake_case ) > 0: lowercase__ : Optional[int] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for name, attributes in configs_with_unused_attributes.items(): error += f"""{name}: {attributes}\n""" raise ValueError(_snake_case ) if __name__ == "__main__": check_config_attributes()
711
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self , a ) -> str: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): lowercase__ : str = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = 'sshleifer/tiny-gpt2' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : List[Any] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : Tuple = TensorFlowBenchmark(a , [config] ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : List[str] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : List[str] = TensorFlowBenchmark(a , [config] ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Optional[int] = AutoConfig.from_pretrained(a ) lowercase__ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a , [config] ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random' lowercase__ : Any = AutoConfig.from_pretrained(a ) lowercase__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : int = TensorFlowBenchmark(a , configs=[config] ) lowercase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , ) lowercase__ : Any = TensorFlowBenchmark(a ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Any = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , ) lowercase__ : Union[str, Any] = TensorFlowBenchmark(a ) benchmark.run() self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Tuple = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(a ): self.assertTrue(hasattr(a , 'sequential' ) ) self.assertTrue(hasattr(a , 'cumulative' ) ) self.assertTrue(hasattr(a , 'current' ) ) self.assertTrue(hasattr(a , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , ) lowercase__ : Optional[int] = TensorFlowBenchmark(a ) lowercase__ : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
645
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig from transformers.utils import logging logging.set_verbosity_info() _UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' if "resnet-50" in model_name: lowercase__ : str = ResNetConfig.from_pretrained('microsoft/resnet-50' ) elif "resnet-101" in model_name: lowercase__ : str = ResNetConfig.from_pretrained('microsoft/resnet-101' ) else: raise ValueError('Model name should include either resnet50 or resnet101' ) lowercase__ : int = DetrConfig(use_timm_backbone=_lowercase , backbone_config=_lowercase ) # set label attributes lowercase__ : Dict = '''panoptic''' in model_name if is_panoptic: lowercase__ : str = 250 else: lowercase__ : Any = 91 lowercase__ : Optional[int] = '''huggingface/label-files''' lowercase__ : Dict = '''coco-detection-id2label.json''' lowercase__ : Tuple = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='dataset' ) , 'r' ) ) lowercase__ : int = {int(_lowercase ): v for k, v in idalabel.items()} lowercase__ : str = idalabel lowercase__ : Optional[int] = {v: k for k, v in idalabel.items()} return config, is_panoptic def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : List[Any] = [] # stem # fmt: off rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') ) rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') ) rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') ) rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') ) rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') ) # stages for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): # shortcut if layer_idx == 0: rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""", ) ) # 3 convs for i in range(3 ): rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""", ) ) rename_keys.append( ( f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""", f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""", ) ) # fmt: on for i in range(config.encoder_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( ( f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") ) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( ( f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""", ) ) rename_keys.append( (f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", f"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", f"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append( (f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('input_proj.weight', 'input_projection.weight'), ('input_proj.bias', 'input_projection.bias'), ('query_embed.weight', 'query_position_embeddings.weight'), ('transformer.decoder.norm.weight', 'decoder.layernorm.weight'), ('transformer.decoder.norm.bias', 'decoder.layernorm.bias'), ('class_embed.weight', 'class_labels_classifier.weight'), ('class_embed.bias', 'class_labels_classifier.bias'), ('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'), ('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'), ('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'), ('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'), ('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'), ('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'), ] ) return rename_keys def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : int = state_dict.pop(_lowercase ) lowercase__ : List[str] = val def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : str=False ): '''simple docstring''' lowercase__ : Tuple = '''''' if is_panoptic: lowercase__ : List[Any] = '''detr.''' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase__ : int = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) lowercase__ : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Any = in_proj_weight[:256, :] lowercase__ : int = in_proj_bias[:256] lowercase__ : Any = in_proj_weight[256:512, :] lowercase__ : Union[str, Any] = in_proj_bias[256:512] lowercase__ : int = in_proj_weight[-256:, :] lowercase__ : int = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowercase__ : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) lowercase__ : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict lowercase__ : Optional[int] = in_proj_weight[:256, :] lowercase__ : Tuple = in_proj_bias[:256] lowercase__ : Union[str, Any] = in_proj_weight[256:512, :] lowercase__ : int = in_proj_bias[256:512] lowercase__ : List[str] = in_proj_weight[-256:, :] lowercase__ : Optional[Any] = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowercase__ : Dict = state_dict.pop( f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" ) lowercase__ : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowercase__ : Union[str, Any] = in_proj_weight_cross_attn[:256, :] lowercase__ : int = in_proj_bias_cross_attn[:256] lowercase__ : Tuple = in_proj_weight_cross_attn[256:512, :] lowercase__ : List[Any] = in_proj_bias_cross_attn[256:512] lowercase__ : Union[str, Any] = in_proj_weight_cross_attn[-256:, :] lowercase__ : Any = in_proj_bias_cross_attn[-256:] def a_ ( ): '''simple docstring''' lowercase__ : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowercase__ : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im @torch.no_grad() def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=False ): '''simple docstring''' lowercase__ : str = get_detr_config(_lowercase ) # load original model from torch hub lowercase__ : Optional[int] = { '''detr-resnet-50''': '''detr_resnet50''', '''detr-resnet-101''': '''detr_resnet101''', } logger.info(f"""Converting model {model_name}...""" ) lowercase__ : str = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=_lowercase ).eval() lowercase__ : int = detr.state_dict() # rename keys for src, dest in create_rename_keys(_lowercase ): if is_panoptic: lowercase__ : List[str] = '''detr.''' + src rename_key(_lowercase , _lowercase , _lowercase ) # query, key and value matrices need special treatment read_in_q_k_v(_lowercase , is_panoptic=_lowercase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase__ : Optional[Any] = '''detr.model.''' if is_panoptic else '''model.''' for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith('detr' ) and not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ) ): lowercase__ : str = state_dict.pop(_lowercase ) lowercase__ : Any = val elif "class_labels_classifier" in key or "bbox_predictor" in key: lowercase__ : Optional[Any] = state_dict.pop(_lowercase ) lowercase__ : Optional[int] = val elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ): continue else: lowercase__ : Optional[Any] = state_dict.pop(_lowercase ) lowercase__ : Union[str, Any] = val else: if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): lowercase__ : Optional[Any] = state_dict.pop(_lowercase ) lowercase__ : List[str] = val # finally, create HuggingFace model and load state dict lowercase__ : List[str] = DetrForSegmentation(_lowercase ) if is_panoptic else DetrForObjectDetection(_lowercase ) model.load_state_dict(_lowercase ) model.eval() # verify our conversion on an image lowercase__ : List[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection''' lowercase__ : List[str] = DetrImageProcessor(format=_lowercase ) lowercase__ : List[str] = processor(images=prepare_img() , return_tensors='pt' ) lowercase__ : Optional[int] = encoding['''pixel_values'''] lowercase__ : List[str] = detr(_lowercase ) lowercase__ : List[Any] = model(_lowercase ) assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1E-3 ) assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1E-3 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1E-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) model.save_pretrained(_lowercase ) processor.save_pretrained(_lowercase ) if push_to_hub: # Upload model and image processor to the hub logger.info('Uploading PyTorch model and image processor to the hub...' ) model.push_to_hub(f"""nielsr/{model_name}""" ) processor.push_to_hub(f"""nielsr/{model_name}""" ) if __name__ == "__main__": _UpperCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument( "--model_name", default="detr-resnet-50", type=str, choices=["detr-resnet-50", "detr-resnet-101"], help="Name of the DETR model you\'d like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.") _UpperCamelCase : List[str] = parser.parse_args() convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
712
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class UpperCAmelCase_ ( _a): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any: lowercase__ : Tuple = parent lowercase__ : List[Any] = batch_size lowercase__ : List[Any] = seq_length lowercase__ : List[Any] = is_training lowercase__ : Optional[Any] = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : int = use_labels lowercase__ : Tuple = vocab_size lowercase__ : int = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : List[str] = type_vocab_size lowercase__ : Tuple = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = num_labels lowercase__ : Tuple = num_choices lowercase__ : str = scope def _UpperCAmelCase ( self ) -> Any: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_input_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Dict = None lowercase__ : Optional[Any] = None lowercase__ : int = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> Optional[int]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Tuple = DistilBertModel(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , a ) lowercase__ : str = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a ) model.to(a ) model.eval() lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int: lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]: lowercase__ : int = self.num_labels lowercase__ : Dict = DistilBertForSequenceClassification(a ) model.to(a ) model.eval() lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any: lowercase__ : Any = self.num_labels lowercase__ : List[str] = DistilBertForTokenClassification(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple: lowercase__ : List[Any] = self.num_choices lowercase__ : Any = DistilBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : List[str] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ : str = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Any = True lowerCamelCase__ : List[Any] = True lowerCamelCase__ : Optional[Any] = True def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : str = DistilBertModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 ) def _UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = DistilBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @slow @require_torch_gpu def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase__ : Optional[int] = True lowercase__ : Union[str, Any] = model_class(config=a ) lowercase__ : int = self._prepare_for_class(a , a ) lowercase__ : Tuple = torch.jit.trace( a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) ) lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a ) loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' ) lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__ : Optional[Any] = model(a , attention_mask=a )[0] lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , a ) lowercase__ : List[Any] = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" import argparse import os import re import packaging.version _UpperCamelCase : Union[str, Any] = "examples/" _UpperCamelCase : Any = { "examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"), "init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"), "setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","), "doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"), } _UpperCamelCase : List[Any] = { "init": "src/transformers/__init__.py", "setup": "setup.py", } _UpperCamelCase : Tuple = "README.md" def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict ): '''simple docstring''' with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase__ : List[Any] = f.read() lowercase__ , lowercase__ : int = REPLACE_PATTERNS[pattern] lowercase__ : Optional[int] = replace.replace('VERSION' , __UpperCamelCase ) lowercase__ : Tuple = re_pattern.sub(__UpperCamelCase , __UpperCamelCase ) with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.write(__UpperCamelCase ) def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' for folder, directories, fnames in os.walk(__UpperCamelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('research_projects' ) if "legacy" in directories: directories.remove('legacy' ) for fname in fnames: if fname.endswith('.py' ): update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='examples' ) def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=False ): '''simple docstring''' for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if not patch: update_version_in_examples(__UpperCamelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = '🤗 Transformers currently provides the following architectures' lowercase__ : Tuple = '1. Want to contribute a new model?' with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase__ : List[Any] = f.readlines() # Find the start of the list. lowercase__ : Optional[int] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 lowercase__ : Optional[Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('1.' ): lowercase__ : Optional[int] = lines[index].replace( 'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , ) index += 1 with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__UpperCamelCase ) def a_ ( ): '''simple docstring''' with open(REPLACE_FILES['init'] , 'r' ) as f: lowercase__ : Tuple = f.read() lowercase__ : Optional[int] = REPLACE_PATTERNS['init'][0].search(__UpperCamelCase ).groups()[0] return packaging.version.parse(__UpperCamelCase ) def a_ ( _lowerCAmelCase : Union[str, Any]=False ): '''simple docstring''' lowercase__ : int = get_version() if patch and default_version.is_devrelease: raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' ) if default_version.is_devrelease: lowercase__ : Union[str, Any] = default_version.base_version elif patch: lowercase__ : List[str] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: lowercase__ : Dict = f"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. lowercase__ : Tuple = input(f"""Which version are you releasing? [{default_version}]""" ) if len(__UpperCamelCase ) == 0: lowercase__ : int = default_version print(f"""Updating version to {version}.""" ) global_version_update(__UpperCamelCase , patch=__UpperCamelCase ) if not patch: print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = get_version() lowercase__ : Union[str, Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0""" lowercase__ : Tuple = current_version.base_version # Check with the user we got that right. lowercase__ : Optional[int] = input(f"""Which version are we developing now? [{dev_version}]""" ) if len(__UpperCamelCase ) == 0: lowercase__ : List[Any] = dev_version print(f"""Updating version to {version}.""" ) global_version_update(__UpperCamelCase ) print('Cleaning main README, don\'t forget to run `make fix-copies`.' ) clean_main_ref_in_model_list() if __name__ == "__main__": _UpperCamelCase : Dict = argparse.ArgumentParser() parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.") parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.") _UpperCamelCase : Union[str, Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("Nothing to do after a patch :-)") else: post_release_work()
713
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase : Any = logging.get_logger(__name__) _UpperCamelCase : Any = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__): lowerCamelCase__ : Optional[int] = 'convnextv2' def __init__( self , a=3 , a=4 , a=4 , a=None , a=None , a="gelu" , a=0.02 , a=1e-12 , a=0.0 , a=2_2_4 , a=None , a=None , **a , ) -> Optional[int]: super().__init__(**a ) lowercase__ : Dict = num_channels lowercase__ : Optional[Any] = patch_size lowercase__ : str = num_stages lowercase__ : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes lowercase__ : str = [3, 3, 9, 3] if depths is None else depths lowercase__ : int = hidden_act lowercase__ : int = initializer_range lowercase__ : str = layer_norm_eps lowercase__ : Optional[int] = drop_path_rate lowercase__ : Dict = image_size lowercase__ : List[str] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowercase__ : str = get_aligned_output_features_output_indices( out_features=a , out_indices=a , stage_names=self.stage_names )
714
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any: lowercase__ : List[str] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[int] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : str = is_training lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[Any] = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Any = type_sequence_label_size lowercase__ : Dict = initializer_range lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Tuple = n_targets lowercase__ : Optional[int] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase__ : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase__ : int = [] for i in range(self.batch_size ): lowercase__ : Optional[Any] = {} lowercase__ : Any = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=a ) lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a ) labels.append(a ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> List[Any]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _UpperCAmelCase ( self , a , a , a ) -> int: lowercase__ : List[str] = YolosModel(config=a ) model.to(a ) model.eval() lowercase__ : List[Any] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = YolosForObjectDetection(a ) model.to(a ) model.eval() lowercase__ : Dict = model(pixel_values=a ) lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase__ : str = model(pixel_values=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs lowercase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCamelCase__ : List[str] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Tuple = False lowerCamelCase__ : Union[str, Any] = False def _UpperCAmelCase ( self , a , a , a=False ) -> Dict: lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase__ : Optional[Any] = [] for i in range(self.model_tester.batch_size ): lowercase__ : Dict = {} lowercase__ : Dict = torch.ones( size=(self.model_tester.n_targets,) , device=a , dtype=torch.long ) lowercase__ : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=a , dtype=torch.float ) labels.append(a ) lowercase__ : Union[str, Any] = labels return inputs_dict def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Dict = YolosModelTester(self ) lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: # YOLOS does not use inputs_embeds pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) lowercase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict = True # in YOLOS, the seq_len is different lowercase__ : Tuple = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : str = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : Optional[int] = True lowercase__ : List[Any] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[str] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : Dict = len(a ) # Check attention is always last and order is fine lowercase__ : Any = True lowercase__ : int = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowercase__ : Tuple = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> List[str]: def check_hidden_states_output(a , a , a ): lowercase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(a , a ) ) lowercase__ : int = outputs.hidden_states lowercase__ : Any = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a ) , a ) # YOLOS has a different seq_length lowercase__ : Optional[int] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[Any] = True check_hidden_states_output(a , a , a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*a ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : int = YolosModel.from_pretrained(a ) self.assertIsNotNone(a ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a ) lowercase__ : Tuple = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : int = model(inputs.pixel_values ) # verify outputs lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : Any = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , ) lowercase__ : List[str] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) ) # verify postprocessing lowercase__ : Optional[Any] = image_processor.post_process_object_detection( a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a ) lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7] lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , a ) self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCamelCase : Union[str, Any] = { "configuration_bridgetower": [ "BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP", "BridgeTowerConfig", "BridgeTowerTextConfig", "BridgeTowerVisionConfig", ], "processing_bridgetower": ["BridgeTowerProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : int = ["BridgeTowerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[str] = [ "BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST", "BridgeTowerForContrastiveLearning", "BridgeTowerForImageAndTextRetrieval", "BridgeTowerForMaskedLM", "BridgeTowerModel", "BridgeTowerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_bridgetower import ( BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP, BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig, ) from .processing_bridgetower import BridgeTowerProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_bridgetower import BridgeTowerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bridgetower import ( BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST, BridgeTowerForContrastiveLearning, BridgeTowerForImageAndTextRetrieval, BridgeTowerForMaskedLM, BridgeTowerModel, BridgeTowerPreTrainedModel, ) else: import sys _UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
715
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _UpperCamelCase : int = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple: lowercase__ : str = load_in_abit lowercase__ : str = load_in_abit lowercase__ : List[str] = llm_inta_threshold lowercase__ : Dict = llm_inta_skip_modules lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload lowercase__ : Any = llm_inta_has_fpaa_weight lowercase__ : Any = bnb_abit_quant_type lowercase__ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: lowercase__ : Dict = torch.floataa elif isinstance(a , a ): lowercase__ : Any = getattr(a , a ) elif isinstance(a , torch.dtype ): lowercase__ : Any = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _UpperCAmelCase ( self ) -> str: if not isinstance(self.llm_inta_threshold , a ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , a ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , a ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , a ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _UpperCAmelCase ( self ) -> Tuple: return self.load_in_abit or self.load_in_abit def _UpperCAmelCase ( self ) -> List[str]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]: lowercase__ : List[Any] = cls(**a ) lowercase__ : Union[str, Any] = [] for key, value in kwargs.items(): if hasattr(a , a ): setattr(a , a , a ) to_remove.append(a ) for key in to_remove: kwargs.pop(a , a ) if return_unused_kwargs: return config, kwargs else: return config def _UpperCAmelCase ( self , a ) -> Dict: with open(a , 'w' , encoding='utf-8' ) as writer: lowercase__ : Any = self.to_dict() lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n' writer.write(a ) def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self ) -> Dict: return f"""{self.__class__.__name__} {self.to_json_string()}""" def _UpperCAmelCase ( self , a = True ) -> str: if use_diff is True: lowercase__ : List[Any] = self.to_diff_dict() else: lowercase__ : List[str] = self.to_dict() return json.dumps(a , indent=2 , sort_keys=a ) + "\n" def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Tuple = self.to_dict() # get the default config dict lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict() lowercase__ : int = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: lowercase__ : Optional[int] = value return serializable_config_dict
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : List[str] = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowercase__ : Optional[int] = set() # Replace all the whitespace in our sentence lowercase__ : Union[str, Any] = input_str.replace(' ' , '' ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(_lowerCAmelCase ) == 26 def a_ ( _lowerCAmelCase : Dict = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' lowercase__ : int = [False] * 26 for char in input_str: if char.islower(): lowercase__ : Tuple = True elif char.isupper(): lowercase__ : str = True return all(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : Tuple = "The quick brown fox jumps over the lazy dog" , ): '''simple docstring''' return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def a_ ( ): '''simple docstring''' from timeit import timeit lowercase__ : Any = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit('is_pangram()' , setup=_lowerCAmelCase ) ) print(timeit('is_pangram_faster()' , setup=_lowerCAmelCase ) ) print(timeit('is_pangram_fastest()' , setup=_lowerCAmelCase ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
716
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCamelCase : int = 16 _UpperCamelCase : Union[str, Any] = 32 def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' return int(x / 2**20 ) class UpperCAmelCase_ : def __enter__( self ) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ : List[str] = torch.cuda.memory_allocated() return self def __exit__( self , *a ) -> Any: gc.collect() torch.cuda.empty_cache() lowercase__ : Optional[Any] = torch.cuda.memory_allocated() lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated() lowercase__ : List[Any] = bamb(self.end - self.begin ) lowercase__ : List[Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ): '''simple docstring''' lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase__ : Union[str, Any] = load_dataset( 'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} ) def tokenize_function(_lowerCAmelCase : int ): # max_length=None => use the model max length (it's actually the default) lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : Union[str, Any] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) lowercase__ : Dict = DataLoader( tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[int] = config['lr'] lowercase__ : Optional[Any] = int(config['num_epochs'] ) lowercase__ : Optional[Any] = int(config['seed'] ) lowercase__ : int = int(config['batch_size'] ) lowercase__ : Union[str, Any] = args.model_name_or_path set_seed(_lowerCAmelCase ) lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer lowercase__ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ : List[Any] = 1 lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over lowercase__ : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Tuple = 0 # Now we train the model lowercase__ : Optional[Any] = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCAmelCase ): lowercase__ : List[Any] = model(**_lowerCAmelCase ) lowercase__ : Dict = outputs.loss lowercase__ : int = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , ) parser.add_argument( '--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , ) lowercase__ : Any = parser.parse_args() lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
645
0
"""simple docstring""" from __future__ import annotations _UpperCamelCase : List[Any] = list[tuple[int, int]] _UpperCamelCase : str = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _UpperCamelCase : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class UpperCAmelCase_ : def __init__( self , a , a , a , a , a , a , ) -> int: lowercase__ : Union[str, Any] = pos_x lowercase__ : Tuple = pos_y lowercase__ : str = (pos_y, pos_x) lowercase__ : Dict = goal_x lowercase__ : Any = goal_y lowercase__ : int = g_cost lowercase__ : Dict = parent lowercase__ : int = self.calculate_heuristic() def _UpperCAmelCase ( self ) -> float: lowercase__ : Tuple = abs(self.pos_x - self.goal_x ) lowercase__ : Any = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , a ) -> bool: return self.f_cost < other.f_cost class UpperCAmelCase_ : def __init__( self , a , a ) -> Optional[int]: lowercase__ : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a ) lowercase__ : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , a ) lowercase__ : Dict = [self.start] lowercase__ : int = [] lowercase__ : List[Any] = False def _UpperCAmelCase ( self ) -> Path | None: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowercase__ : Tuple = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: lowercase__ : List[str] = True return self.retrace_path(a ) self.closed_nodes.append(a ) lowercase__ : str = self.get_successors(a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(a ) else: # retrieve the best current path lowercase__ : Any = self.open_nodes.pop(self.open_nodes.index(a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(a ) else: self.open_nodes.append(a ) if not self.reached: return [self.start.pos] return None def _UpperCAmelCase ( self , a ) -> list[Node]: lowercase__ : Any = [] for action in delta: lowercase__ : Optional[int] = parent.pos_x + action[1] lowercase__ : Any = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) ) return successors def _UpperCAmelCase ( self , a ) -> Path: lowercase__ : Union[str, Any] = node lowercase__ : Dict = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowercase__ : Optional[int] = current_node.parent path.reverse() return path if __name__ == "__main__": _UpperCamelCase : Dict = (0, 0) _UpperCamelCase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("------") _UpperCamelCase : List[str] = GreedyBestFirst(init, goal) _UpperCamelCase : Any = greedy_bf.search() if path: for pos_x, pos_y in path: _UpperCamelCase : str = 2 for elem in grid: print(elem)
717
"""simple docstring""" def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Any = [0] * len(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): # use last results for better performance - dynamic programming lowercase__ : List[str] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : Dict = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Union[str, Any] = j return prefix_result def a_ ( _lowerCAmelCase : str ): '''simple docstring''' return max(prefix_function(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _UpperCamelCase : Union[str, Any] = { "169M": 12, "430M": 24, "1B5": 24, "3B": 32, "7B": 32, "14B": 40, } _UpperCamelCase : List[Any] = { "169M": 7_68, "430M": 10_24, "1B5": 20_48, "3B": 25_60, "7B": 40_96, "14B": 51_20, } def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : List[str] = list(state_dict.keys() ) for name in state_dict_keys: lowercase__ : Optional[int] = state_dict.pop(__lowerCAmelCase ) # emb -> embedding if name.startswith('emb.' ): lowercase__ : Tuple = name.replace('emb.' , 'embeddings.' ) # ln_0 -> pre_ln (only present at block 0) if name.startswith('blocks.0.ln0' ): lowercase__ : Optional[int] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' ) # att -> attention lowercase__ : Union[str, Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , __lowerCAmelCase ) # ffn -> feed_forward lowercase__ : Dict = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , __lowerCAmelCase ) # time_mix_k -> time_mix_key and reshape if name.endswith('.time_mix_k' ): lowercase__ : int = name.replace('.time_mix_k' , '.time_mix_key' ) # time_mix_v -> time_mix_value and reshape if name.endswith('.time_mix_v' ): lowercase__ : Union[str, Any] = name.replace('.time_mix_v' , '.time_mix_value' ) # time_mix_r -> time_mix_key and reshape if name.endswith('.time_mix_r' ): lowercase__ : int = name.replace('.time_mix_r' , '.time_mix_receptance' ) if name != "head.weight": lowercase__ : List[str] = "rwkv." + name lowercase__ : Optional[Any] = weight return state_dict def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : int=False , _lowerCAmelCase : Dict=None ): '''simple docstring''' if tokenizer_file is None: print('No `--tokenizer_file` provided, we will use the default tokenizer.' ) lowercase__ : str = 5_0277 lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' ) else: lowercase__ : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase ) lowercase__ : List[Any] = len(__lowerCAmelCase ) tokenizer.save_pretrained(__lowerCAmelCase ) # 2. Build the config lowercase__ : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: lowercase__ : Optional[Any] = candidate break if size is None: raise ValueError('Could not infer the size, please provide it with the `--size` argument.' ) if size not in possible_sizes: raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" ) lowercase__ : Any = RwkvConfig( vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(__lowerCAmelCase ) # 3. Download model file then convert state_dict lowercase__ : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase ) lowercase__ : Optional[int] = torch.load(__lowerCAmelCase , map_location='cpu' ) lowercase__ : Any = convert_state_dict(__lowerCAmelCase ) # 4. Split in shards and save lowercase__ : List[str] = shard_checkpoint(__lowerCAmelCase ) for shard_file, shard in shards.items(): torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) if index is not None: lowercase__ : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase ) # Save the index as well with open(__lowerCAmelCase , 'w' , encoding='utf-8' ) as f: lowercase__ : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n" f.write(__lowerCAmelCase ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( 'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' ) lowercase__ : Union[str, Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: lowercase__ : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError('Please provide a `model_name` to push the model to the Hub.' ) lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase ) model.push_to_hub(__lowerCAmelCase , max_shard_size='2GB' ) tokenizer.push_to_hub(__lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( "--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint." ) parser.add_argument( "--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo." ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="Where to save the converted model." ) parser.add_argument( "--tokenizer_file", default=None, type=str, help="Path to the tokenizer file to use (if not provided, only the model is converted).", ) parser.add_argument( "--size", default=None, type=str, help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.", ) parser.add_argument( "--push_to_hub", action="store_true", help="Push to the Hub the converted model.", ) parser.add_argument( "--model_name", default=None, type=str, help="Name of the pushed model on the Hub, including the username / organization.", ) _UpperCamelCase : Optional[Any] = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
718
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]: lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0} lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} lowercase__ : Optional[int] = parent lowercase__ : Optional[int] = batch_size lowercase__ : str = num_channels lowercase__ : Any = image_size lowercase__ : Optional[Any] = min_resolution lowercase__ : int = max_resolution lowercase__ : List[Any] = do_resize lowercase__ : List[str] = size lowercase__ : str = do_center_crop lowercase__ : List[Any] = crop_size lowercase__ : Union[str, Any] = do_flip_channel_order def _UpperCAmelCase ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Tuple = MobileViTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_center_crop' ) ) self.assertTrue(hasattr(a , 'center_crop' ) ) self.assertTrue(hasattr(a , 'do_flip_channel_order' ) ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> str: # Initialize image_processing lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Dict: # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
645
0
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _UpperCamelCase : int = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. _UpperCamelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS) _UpperCamelCase : int = transformers.models.auto.configuration_auto.CONFIG_MAPPING _UpperCamelCase : List[Any] = { # used to compute the property `self.chunk_length` "EncodecConfig": ["overlap"], # used as `self.bert_model = BertModel(config, ...)` "DPRConfig": True, # not used in modeling files, but it's an important information "FSMTConfig": ["langs"], # used internally in the configuration class file "GPTNeoConfig": ["attention_types"], # used internally in the configuration class file "EsmConfig": ["is_folding_model"], # used during training (despite we don't have training script for these models yet) "Mask2FormerConfig": ["ignore_value"], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) "OneFormerConfig": ["ignore_value", "norm"], # used during preprocessing and collation, see `collating_graphormer.py` "GraphormerConfig": ["spatial_pos_max"], # used internally in the configuration class file "T5Config": ["feed_forward_proj"], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally "MT5Config": ["feed_forward_proj", "tokenizer_class"], "UMT5Config": ["feed_forward_proj", "tokenizer_class"], # used internally in the configuration class file "LongT5Config": ["feed_forward_proj"], # used internally in the configuration class file "SwitchTransformersConfig": ["feed_forward_proj"], # having default values other than `1e-5` - we can't fix them without breaking "BioGptConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "GLPNConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "SegformerConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "CvtConfig": ["layer_norm_eps"], # having default values other than `1e-5` - we can't fix them without breaking "PerceiverConfig": ["layer_norm_eps"], # used internally to calculate the feature size "InformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate the feature size "AutoformerConfig": ["num_static_real_features", "num_time_features"], # used internally to calculate `mlp_dim` "SamVisionConfig": ["mlp_ratio"], # For (head) training, but so far not implemented "ClapAudioConfig": ["num_classes"], # Not used, but providing useful information to users "SpeechT5HifiGanConfig": ["sampling_rate"], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { "CLIPSegConfig": True, "DeformableDetrConfig": True, "DetaConfig": True, "DinatConfig": True, "DonutSwinConfig": True, "EfficientFormerConfig": True, "FSMTConfig": True, "JukeboxConfig": True, "LayoutLMv2Config": True, "MaskFormerSwinConfig": True, "MT5Config": True, "NatConfig": True, "OneFormerConfig": True, "PerceiverConfig": True, "RagConfig": True, "SpeechT5Config": True, "SwinConfig": True, "Swin2SRConfig": True, "Swinv2Config": True, "SwitchTransformersConfig": True, "TableTransformerConfig": True, "TapasConfig": True, "TransfoXLConfig": True, "UniSpeechConfig": True, "UniSpeechSatConfig": True, "WavLMConfig": True, "WhisperConfig": True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) "JukeboxPriorConfig": True, # TODO: @Younes (for `is_decoder`) "Pix2StructTextConfig": True, } ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : List[Any] = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( f"""config.{attribute}""" in modeling_source or f"""getattr(config, \"{attribute}\"""" in modeling_source or f"""getattr(self.config, \"{attribute}\"""" in modeling_source ): lowercase__ : Optional[int] = True # Deal with multi-line cases elif ( re.search( Rf"""getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"""" , lowercase__ , ) is not None ): lowercase__ : List[str] = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: lowercase__ : Union[str, Any] = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files lowercase__ : str = [ 'bos_index', 'eos_index', 'pad_index', 'unk_index', 'mask_index', 'image_size', 'use_cache', 'out_features', 'out_indices', ] lowercase__ : Any = ['encoder_no_repeat_ngram_size'] # Special cases to be allowed lowercase__ : List[str] = True if not attribute_used: lowercase__ : List[str] = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: lowercase__ : Union[str, Any] = True elif attribute in ["tie_word_embeddings"] and default_value is False: lowercase__ : Dict = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: lowercase__ : Union[str, Any] = True elif attribute.endswith('_token_id' ): lowercase__ : Optional[int] = True # configuration class specific cases if not case_allowed: lowercase__ : int = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) lowercase__ : Any = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : Any = dict(inspect.signature(config_class.__init__ ).parameters ) lowercase__ : Optional[Any] = [x for x in list(signature.keys() ) if x not in ['self', 'kwargs']] lowercase__ : str = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass lowercase__ : Optional[Any] = {} if len(config_class.attribute_map ) > 0: lowercase__ : List[str] = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files lowercase__ : List[str] = inspect.getsourcefile(lowercase__ ) lowercase__ : Dict = os.path.dirname(lowercase__ ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. lowercase__ : str = [os.path.join(lowercase__ , lowercase__ ) for fn in os.listdir(lowercase__ ) if fn.startswith('modeling_' )] # Get the source code strings lowercase__ : Optional[Any] = [] for path in modeling_paths: if os.path.isfile(lowercase__ ): with open(lowercase__ ) as fp: modeling_sources.append(fp.read() ) lowercase__ : Dict = [] for config_param, default_value in zip(lowercase__ , lowercase__ ): # `attributes` here is all the variant names for `config_param` lowercase__ : str = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): unused_attributes.append(attributes[0] ) return sorted(lowercase__ ) def a_ ( ): '''simple docstring''' lowercase__ : Any = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) lowercase__ : Union[str, Any] = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda _lowerCAmelCase : inspect.isclass(lowercase__ ) and issubclass(lowercase__ , lowercase__ ) and inspect.getmodule(lowercase__ ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: lowercase__ : List[Any] = check_config_attributes_being_used(lowercase__ ) if len(lowercase__ ) > 0: lowercase__ : int = unused_attributes if len(lowercase__ ) > 0: lowercase__ : Union[str, Any] = 'The following configuration classes contain unused attributes in the corresponding modeling files:\n' for name, attributes in configs_with_unused_attributes.items(): error += f"""{name}: {attributes}\n""" raise ValueError(lowercase__ ) if __name__ == "__main__": check_config_attributes()
719
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict: lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : List[Any] = seq_length lowercase__ : int = is_training lowercase__ : str = use_attention_mask lowercase__ : Dict = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : Tuple = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Dict = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[str] = type_sequence_label_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Optional[int] = num_choices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : List[str] = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Any = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self ) @slow def _UpperCAmelCase ( self ) -> str: for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' ) lowercase__ : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' ) lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__ : Any = model(a , attention_mask=a )[0] lowercase__ : Tuple = (1, 1_1, 7_6_8) self.assertEqual(output.shape , a ) lowercase__ : Optional[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean _UpperCamelCase : str = 0 _UpperCamelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] _UpperCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right _UpperCamelCase : Tuple = tuple[int, int] class UpperCAmelCase_ : def __init__( self , a , a , a , a , a , a , ) -> None: lowercase__ : Optional[int] = pos_x lowercase__ : List[Any] = pos_y lowercase__ : Union[str, Any] = (pos_y, pos_x) lowercase__ : List[Any] = goal_x lowercase__ : Optional[Any] = goal_y lowercase__ : str = g_cost lowercase__ : str = parent lowercase__ : Optional[Any] = self.calculate_heuristic() lowercase__ : List[str] = self.g_cost + self.h_cost def _UpperCAmelCase ( self ) -> float: lowercase__ : List[str] = self.pos_x - self.goal_x lowercase__ : Optional[Any] = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(__a ) + abs(__a ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self , a ) -> bool: return self.f_cost < other.f_cost class UpperCAmelCase_ : def __init__( self , a , a ) -> Optional[Any]: lowercase__ : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __a ) lowercase__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , __a ) lowercase__ : Optional[Any] = [self.start] lowercase__ : list[Node] = [] lowercase__ : Tuple = False def _UpperCAmelCase ( self ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowercase__ : Tuple = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(__a ) self.closed_nodes.append(__a ) lowercase__ : Dict = self.get_successors(__a ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(__a ) else: # retrieve the best current path lowercase__ : Any = self.open_nodes.pop(self.open_nodes.index(__a ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(__a ) else: self.open_nodes.append(__a ) return [self.start.pos] def _UpperCAmelCase ( self , a ) -> list[Node]: lowercase__ : str = [] for action in delta: lowercase__ : Union[str, Any] = parent.pos_x + action[1] lowercase__ : Optional[Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( __a , __a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __a , ) ) return successors def _UpperCAmelCase ( self , a ) -> list[TPosition]: lowercase__ : Dict = node lowercase__ : Tuple = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowercase__ : str = current_node.parent path.reverse() return path class UpperCAmelCase_ : def __init__( self , a , a ) -> None: lowercase__ : Dict = AStar(__a , __a ) lowercase__ : str = AStar(__a , __a ) lowercase__ : int = False def _UpperCAmelCase ( self ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowercase__ : Tuple = self.fwd_astar.open_nodes.pop(0 ) lowercase__ : List[Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( __a , __a ) self.fwd_astar.closed_nodes.append(__a ) self.bwd_astar.closed_nodes.append(__a ) lowercase__ : int = current_bwd_node lowercase__ : List[Any] = current_fwd_node lowercase__ : Optional[int] = { self.fwd_astar: self.fwd_astar.get_successors(__a ), self.bwd_astar: self.bwd_astar.get_successors(__a ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(__a ) else: # retrieve the best current path lowercase__ : str = astar.open_nodes.pop( astar.open_nodes.index(__a ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(__a ) else: astar.open_nodes.append(__a ) return [self.fwd_astar.start.pos] def _UpperCAmelCase ( self , a , a ) -> list[TPosition]: lowercase__ : str = self.fwd_astar.retrace_path(__a ) lowercase__ : Any = self.bwd_astar.retrace_path(__a ) bwd_path.pop() bwd_path.reverse() lowercase__ : Any = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] _UpperCamelCase : Optional[int] = (0, 0) _UpperCamelCase : Optional[int] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) _UpperCamelCase : Optional[int] = time.time() _UpperCamelCase : Optional[int] = AStar(init, goal) _UpperCamelCase : int = a_star.search() _UpperCamelCase : Optional[Any] = time.time() - start_time print(f'''AStar execution time = {end_time:f} seconds''') _UpperCamelCase : List[Any] = time.time() _UpperCamelCase : List[str] = BidirectionalAStar(init, goal) _UpperCamelCase : List[str] = time.time() - bd_start_time print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
720
"""simple docstring""" from collections.abc import Sequence def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' lowercase__ : int = 0.0 for coeff in reversed(_lowerCAmelCase ): lowercase__ : List[Any] = result * x + coeff return result if __name__ == "__main__": _UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0) _UpperCamelCase : Dict = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ): '''simple docstring''' if not len(_UpperCamelCase ) == len(_UpperCamelCase ) == 3: raise ValueError('Please enter a valid equation.' ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError('Both a & b of two equations can\'t be zero.' ) # Extract the coefficients lowercase__ , lowercase__ , lowercase__ : Optional[int] = equationa lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = equationa # Calculate the determinants of the matrices lowercase__ : int = aa * ba - aa * ba lowercase__ : str = ca * ba - ca * ba lowercase__ : Optional[int] = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError('Infinite solutions. (Consistent system)' ) else: raise ValueError('No solution. (Inconsistent system)' ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: lowercase__ : Dict = determinant_x / determinant lowercase__ : Tuple = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
721
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _UpperCamelCase : Any = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def a_ ( _lowerCAmelCase : Optional[Any]=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a)) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = None def _UpperCAmelCase ( self , a , a ) -> List[Any]: with TemporaryDirectory() as tmp_dir: lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a ) lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a ) self.assertTrue(os.path.exists(a ) ) @pytest.mark.integration def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : Optional[int] = import_main_class(dataset_module.module_path ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowercase__ : Optional[int] = None builder_instance.download_and_prepare() lowercase__ : Optional[int] = builder_instance.as_dataset() assert ds @pytest.mark.integration def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert "train" in ds assert isinstance(ds['train'] , _lowerCAmelCase ) assert next(iter(ds['train'] ) )
645
0
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( __a): def __init__( self , a , a , a , **a ) -> Union[str, Any]: lowercase__ : Optional[Any] = feature_size lowercase__ : List[Any] = sampling_rate lowercase__ : Optional[int] = padding_value lowercase__ : Optional[Any] = kwargs.pop('padding_side' , 'right' ) lowercase__ : str = kwargs.pop('return_attention_mask' , A__ ) super().__init__(**A__ ) def _UpperCAmelCase ( self , a , a = True , a = None , a = False , a = None , a = None , a = None , ) -> BatchFeature: # If we have a list of dicts, let's convert it in a dict of lists # We do this to allow using this method as a collate_fn function in PyTorch Dataloader if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ): lowercase__ : Any = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( 'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`' f""" to this method that includes {self.model_input_names[0]}, but you provided""" f""" {list(processed_features.keys() )}""" ) lowercase__ : List[str] = processed_features[self.model_input_names[0]] lowercase__ : str = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(A__ ) == 0: if return_attention_mask: lowercase__ : Dict = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch lowercase__ : Tuple = required_input[0] if isinstance(A__ , (list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. lowercase__ : List[str] = 0 while len(required_input[index] ) == 0: index += 1 if index < len(A__ ): lowercase__ : Tuple = required_input[index][0] if return_tensors is None: if is_tf_tensor(A__ ): lowercase__ : Optional[Any] = 'tf' elif is_torch_tensor(A__ ): lowercase__ : Any = 'pt' elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ): lowercase__ : List[str] = 'np' else: raise ValueError( f"""type of {first_element} unknown: {type(A__ )}. """ 'Should be one of a python, numpy, pytorch or tensorflow object.' ) for key, value in processed_features.items(): if isinstance(value[0] , (int, float) ): lowercase__ : Optional[int] = to_numpy(A__ ) else: lowercase__ : int = [to_numpy(A__ ) for v in value] # Convert padding_strategy in PaddingStrategy lowercase__ : int = self._get_padding_strategies(padding=A__ , max_length=A__ ) lowercase__ : int = processed_features[self.model_input_names[0]] lowercase__ : Any = len(A__ ) if not all(len(A__ ) == batch_size for v in processed_features.values() ): raise ValueError('Some items in the output dictionary have a different batch size than others.' ) lowercase__ : int = [] for i in range(A__ ): lowercase__ : Any = {k: v[i] for k, v in processed_features.items()} # truncation lowercase__ : Dict = self._truncate( A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , ) truncated_inputs.append(A__ ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length lowercase__ : Any = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) lowercase__ : Any = PaddingStrategy.MAX_LENGTH lowercase__ : Optional[int] = {} for i in range(A__ ): # padding lowercase__ : Union[str, Any] = self._pad( truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , ) for key, value in outputs.items(): if key not in batch_outputs: lowercase__ : Tuple = [] if value.dtype is np.dtype(np.floataa ): lowercase__ : Optional[int] = value.astype(np.floataa ) batch_outputs[key].append(A__ ) return BatchFeature(A__ , tensor_type=A__ ) def _UpperCAmelCase ( self , a , a = None , a = PaddingStrategy.DO_NOT_PAD , a = None , a = None , ) -> dict: lowercase__ : List[str] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: lowercase__ : Any = len(A__ ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowercase__ : Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowercase__ : Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length if return_attention_mask and "attention_mask" not in processed_features: lowercase__ : int = np.ones(len(A__ ) , dtype=np.intaa ) if needs_to_be_padded: lowercase__ : Optional[Any] = max_length - len(A__ ) if self.padding_side == "right": if return_attention_mask: lowercase__ : List[str] = np.pad( processed_features['attention_mask'] , (0, difference) ) lowercase__ : Dict = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) lowercase__ : Optional[Any] = np.pad( A__ , A__ , 'constant' , constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: lowercase__ : Optional[int] = np.pad( processed_features['attention_mask'] , (difference, 0) ) lowercase__ : Any = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) lowercase__ : Dict = np.pad( A__ , A__ , 'constant' , constant_values=self.padding_value ) else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return processed_features def _UpperCAmelCase ( self , a , a = None , a = None , a = None , ) -> Union[str, Any]: if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' ) lowercase__ : str = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowercase__ : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowercase__ : Union[str, Any] = len(A__ ) > max_length if needs_to_be_truncated: lowercase__ : List[str] = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: lowercase__ : List[Any] = processed_features['attention_mask'][:max_length] return processed_features def _UpperCAmelCase ( self , a=False , a=None ) -> Union[str, Any]: # Get padding strategy if padding is not False: if padding is True: lowercase__ : List[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(A__ , A__ ): lowercase__ : Optional[Any] = PaddingStrategy(A__ ) elif isinstance(A__ , A__ ): lowercase__ : List[Any] = padding else: lowercase__ : List[Any] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( 'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use' ' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' ) return padding_strategy
700
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a_ ( _lowerCAmelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): '''simple docstring''' lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_lowerCAmelCase , _lowerCAmelCase ) # Predict target for test data lowercase__ : str = xgb.predict(_lowerCAmelCase ) lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 ) return predictions def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = fetch_california_housing() lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split( _lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 ) lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
645
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _UpperCamelCase : str = logging.get_logger(__name__) _UpperCamelCase : Any = { "microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json", } class UpperCAmelCase_ ( _a , _a): lowerCamelCase__ : List[Any] = 'resnet' lowerCamelCase__ : List[str] = ['basic', 'bottleneck'] def __init__( self , a=3 , a=6_4 , a=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , a=[3, 4, 6, 3] , a="bottleneck" , a="relu" , a=False , a=None , a=None , **a , ) -> str: super().__init__(**a ) if layer_type not in self.layer_types: raise ValueError(f"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" ) lowercase__ : Any = num_channels lowercase__ : Optional[Any] = embedding_size lowercase__ : Optional[Any] = hidden_sizes lowercase__ : List[str] = depths lowercase__ : str = layer_type lowercase__ : Optional[Any] = hidden_act lowercase__ : int = downsample_in_first_stage lowercase__ : str = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(a ) + 1 )] lowercase__ , lowercase__ : Dict = get_aligned_output_features_output_indices( out_features=a , out_indices=a , stage_names=self.stage_names ) class UpperCAmelCase_ ( _a): lowerCamelCase__ : Optional[int] = version.parse("1.11") @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: return 1e-3
701
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]: lowercase__ : str = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Dict = patch_size lowercase__ : Tuple = tubelet_size lowercase__ : Optional[int] = num_frames lowercase__ : Optional[int] = is_training lowercase__ : int = use_labels lowercase__ : Optional[int] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = mask_ratio lowercase__ : Optional[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase__ : Optional[Any] = (image_size // patch_size) ** 2 lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase__ : str = int(mask_ratio * self.seq_length ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : Dict = VideoMAEModel(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = VideoMAEForPreTraining(a ) model.to(a ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Any = torch.ones((self.num_masks,) ) lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase__ : str = model(a , a ) # model only returns predictions for masked patches lowercase__ : str = mask.sum().item() lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowerCamelCase__ : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = False def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = VideoMAEModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]: lowercase__ : Union[str, Any] = copy.deepcopy(a ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase__ : Union[str, Any] = bool_masked_pos.to(a ) if return_labels: if model_class in [ *get_values(a ), ]: lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.has_attentions: pass else: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase__ : Optional[Any] = True lowercase__ : int = False lowercase__ : Any = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Dict = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : List[str] = len(a ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : List[str] = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) ) self.assertEqual(out_len + 1 , len(a ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a ) , a ) lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True check_hidden_states_output(a , a , a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> List[Any]: pass def a_ ( ): '''simple docstring''' lowercase__ : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase__ : str = np.load(_lowerCAmelCase ) return list(_lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( a ) lowercase__ : str = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : Union[str, Any] = model(**a ) # verify the logits lowercase__ : str = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a ) # add boolean mask, indicating which patches to mask lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) lowercase__ : str = torch.load(a ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**a ) # verify the logits lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase__ : List[str] = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a ) self.assertEqual(outputs.logits.shape , a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to( a ) with torch.no_grad(): lowercase__ : Any = model(**a ) lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
645
0
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # and perform gradient accumulation # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _UpperCamelCase : Tuple = 16 _UpperCamelCase : List[Any] = 32 def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple = 16 ): '''simple docstring''' lowercase__ : Tuple = AutoTokenizer.from_pretrained('bert-base-cased' ) lowercase__ : List[Any] = load_dataset('glue' , 'mrpc' ) def tokenize_function(_lowerCAmelCase : List[Any] ): # max_length=None => use the model max length (it's actually the default) lowercase__ : Tuple = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowercase__ , max_length=lowercase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : List[Any] = datasets.map( lowercase__ , batched=lowercase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : List[str] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : List[Any] = 16 elif accelerator.mixed_precision != "no": lowercase__ : Union[str, Any] = 8 else: lowercase__ : Optional[Any] = None return tokenizer.pad( lowercase__ , padding='longest' , max_length=lowercase__ , pad_to_multiple_of=lowercase__ , return_tensors='pt' , ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets['train'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) lowercase__ : int = DataLoader( tokenized_datasets['validation'] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1": from accelerate.test_utils.training import mocked_dataloaders _UpperCamelCase : int = mocked_dataloaders # noqa: F811 def a_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' if os.environ.get('TESTING_MOCKED_DATALOADERS' , lowercase__ ) == "1": lowercase__ : Dict = 2 # New Code # lowercase__ : Any = int(args.gradient_accumulation_steps ) # Initialize accelerator lowercase__ : Any = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase__ ) if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1: raise NotImplementedError( 'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[int] = config['lr'] lowercase__ : Optional[Any] = int(config['num_epochs'] ) lowercase__ : List[str] = int(config['seed'] ) lowercase__ : Union[str, Any] = int(config['batch_size'] ) lowercase__ : List[str] = evaluate.load('glue' , 'mrpc' ) set_seed(lowercase__ ) lowercase__ , lowercase__ : Union[str, Any] = get_dataloaders(lowercase__ , lowercase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowercase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Any = model.to(accelerator.device ) # Instantiate optimizer lowercase__ : Union[str, Any] = AdamW(params=model.parameters() , lr=lowercase__ ) # Instantiate scheduler lowercase__ : Dict = get_linear_schedule_with_warmup( optimizer=lowercase__ , num_warmup_steps=100 , num_training_steps=(len(lowercase__ ) * num_epochs) , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = accelerator.prepare( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) # Now we train the model for epoch in range(lowercase__ ): model.train() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) # New code # # We use the new `accumulate` context manager to perform gradient accumulation # We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests. with accelerator.accumulate(lowercase__ ): lowercase__ : Optional[Any] = model(**lowercase__ ) lowercase__ : List[str] = output.loss accelerator.backward(lowercase__ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowercase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : Optional[Any] = model(**lowercase__ ) lowercase__ : List[Any] = outputs.logits.argmax(dim=-1 ) lowercase__ , lowercase__ : List[str] = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=lowercase__ , references=lowercase__ , ) lowercase__ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}:""" , lowercase__ ) def a_ ( ): '''simple docstring''' lowercase__ : Dict = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=lowercase__ , default=lowercase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) # New Code # parser.add_argument( '--gradient_accumulation_steps' , type=lowercase__ , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) lowercase__ : Any = parser.parse_args() lowercase__ : int = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(lowercase__ , lowercase__ ) if __name__ == "__main__": main()
702
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Dict = value elif weight_type == "weight_v": lowercase__ : List[str] = value elif weight_type == "bias": lowercase__ : Optional[Any] = value else: lowercase__ : List[str] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = [] lowercase__ : List[str] = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue lowercase__ : int = True if "*" in mapped_key: lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : List[Any] = 'weight_g' elif "weight_v" in name: lowercase__ : int = 'weight_v' elif "bias" in name: lowercase__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Union[str, Any] = 'weight' else: lowercase__ : int = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : int = full_name.split('conv_layers.' )[-1] lowercase__ : int = name.split('.' ) lowercase__ : int = int(items[0] ) lowercase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase__ : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ): '''simple docstring''' if config_path is not None: lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: lowercase__ : Any = UniSpeechSatConfig() lowercase__ : Union[str, Any] = '' if is_finetuned: lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase ) else: lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
645
0
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def a_ ( ): '''simple docstring''' lowercase__ : List[Any] = ArgumentParser('Transformers CLI tool' , usage='transformers-cli <command> [<args>]' ) lowercase__ : List[str] = parser.add_subparsers(help='transformers-cli command helpers' ) # Register commands ConvertCommand.register_subcommand(lowerCAmelCase_ ) DownloadCommand.register_subcommand(lowerCAmelCase_ ) EnvironmentCommand.register_subcommand(lowerCAmelCase_ ) RunCommand.register_subcommand(lowerCAmelCase_ ) ServeCommand.register_subcommand(lowerCAmelCase_ ) UserCommands.register_subcommand(lowerCAmelCase_ ) AddNewModelCommand.register_subcommand(lowerCAmelCase_ ) AddNewModelLikeCommand.register_subcommand(lowerCAmelCase_ ) LfsCommands.register_subcommand(lowerCAmelCase_ ) PTtoTFCommand.register_subcommand(lowerCAmelCase_ ) # Let's go lowercase__ : str = parser.parse_args() if not hasattr(lowerCAmelCase_ , 'func' ): parser.print_help() exit(1 ) # Run lowercase__ : Union[str, Any] = args.func(lowerCAmelCase_ ) service.run() if __name__ == "__main__": main()
703
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int: lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Dict = image_size lowercase__ : str = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[str] = embed_dim lowercase__ : Any = depths lowercase__ : Dict = num_heads lowercase__ : List[str] = window_size lowercase__ : int = mlp_ratio lowercase__ : Tuple = qkv_bias lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Tuple = drop_path_rate lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = use_absolute_embeddings lowercase__ : Optional[Any] = patch_norm lowercase__ : Any = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : List[str] = is_training lowercase__ : int = scope lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = type_sequence_label_size lowercase__ : List[str] = encoder_stride lowercase__ : Optional[Any] = out_features lowercase__ : Dict = out_indices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Tuple = MaskFormerSwinModel(config=a ) model.to(a ) model.eval() lowercase__ : str = model(a ) lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a ) model.to(a ) model.eval() lowercase__ : int = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(a ): lowercase__ : Dict = ['stem'] lowercase__ : List[str] = MaskFormerSwinBackbone(config=a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase__ : str = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Any = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = MaskFormerSwinModelTester(self ) lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> str: return def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) @unittest.skip('Swin does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip('Swin does not support feedforward chunking' ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> str: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) lowercase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def _UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple: lowercase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swin has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = 3 lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : int = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(a ): lowercase__ : Union[str, Any] = 0 return t def check_equivalence(a , a , a , a={} ): with torch.no_grad(): lowercase__ : Optional[Any] = model(**a , return_dict=a , **a ) lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple() def recursive_check(a , a ): if isinstance(a , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a , a ): recursive_check(a , a ) elif isinstance(a , a ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(a , a ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has""" f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}.""" ) , ) recursive_check(a , a ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) model.to(a ) model.eval() lowercase__ : Tuple = self._prepare_for_class(a , a ) lowercase__ : Optional[Any] = self._prepare_for_class(a , a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a ) lowercase__ : int = self._prepare_for_class(a , a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _a): lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[int] = MaskFormerSwinModelTester(self ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: lowercase__ : Optional[Any] = backbone_class(a ) backbone.to(a ) backbone.eval() lowercase__ : Union[str, Any] = backbone(**a ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , a ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowercase__ : List[str] = backbone(**a , output_hidden_states=a ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowercase__ : List[Any] = backbone(**a , output_attentions=a ) self.assertIsNotNone(outputs.attentions )
645
0
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _UpperCamelCase : Dict = get_logger(__name__) _UpperCamelCase : int = Path(__file__).parent / "model_card_template.md" _UpperCamelCase : Optional[Any] = uuida().hex _UpperCamelCase : int = os.getenv("HF_HUB_OFFLINE", "").upper() in ENV_VARS_TRUE_VALUES _UpperCamelCase : Optional[int] = os.getenv("DISABLE_TELEMETRY", "").upper() in ENV_VARS_TRUE_VALUES _UpperCamelCase : Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/" def a_ ( _lowerCAmelCase : Union[Dict, str, None] = None ): '''simple docstring''' lowercase__ : List[Any] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"""; torch/{_torch_version}""" if is_flax_available(): ua += f"""; jax/{_jax_version}""" ua += f"""; flax/{_flax_version}""" if is_onnx_available(): ua += f"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): ua += "; " + user_agent return ua def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None ): '''simple docstring''' if token is None: lowercase__ : Optional[Any] = HfFolder.get_token() if organization is None: lowercase__ : Tuple = whoami(SCREAMING_SNAKE_CASE__ )["""name"""] return f"""{username}/{model_id}""" else: return f"""{organization}/{model_id}""" def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : str ): '''simple docstring''' if not is_jinja_available(): raise ValueError( 'Modelcard rendering is based on Jinja templates.' ' Please make sure to have `jinja` installed before using `create_model_card`.' ' To install it, please run `pip install Jinja2`.' ) if hasattr(SCREAMING_SNAKE_CASE__ , 'local_rank' ) and args.local_rank not in [-1, 0]: return lowercase__ : Optional[int] = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , 'hub_token' ) else None lowercase__ : Union[str, Any] = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ ) lowercase__ : Optional[Any] = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , 'gradient_accumulation_steps' ) else None ) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , ) lowercase__ : Tuple = os.path.join(args.output_dir , 'README.md' ) model_card.save(SCREAMING_SNAKE_CASE__ ) def a_ ( _lowerCAmelCase : Optional[str] , _lowerCAmelCase : Optional[str] = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash lowercase__ : Optional[int] = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() ) lowercase__ : List[str] = re.search(R'snapshots/([^/]+)/' , SCREAMING_SNAKE_CASE__ ) if search is None: return None lowercase__ : Union[str, Any] = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _UpperCamelCase : Optional[Any] = os.path.expanduser( os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface")) ) _UpperCamelCase : str = os.path.join(hf_cache_home, "diffusers") def a_ ( _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None ): '''simple docstring''' if new_cache_dir is None: lowercase__ : Tuple = DIFFUSERS_CACHE if old_cache_dir is None: lowercase__ : List[Any] = old_diffusers_cache lowercase__ : List[str] = Path(SCREAMING_SNAKE_CASE__ ).expanduser() lowercase__ : Union[str, Any] = Path(SCREAMING_SNAKE_CASE__ ).expanduser() for old_blob_path in old_cache_dir.glob('**/blobs/*' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): lowercase__ : Union[str, Any] = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ ) new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ ) os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) try: os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) except OSError: logger.warning( 'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _UpperCamelCase : Optional[int] = os.path.join(DIFFUSERS_CACHE, "version_diffusers_cache.txt") if not os.path.isfile(cache_version_file): _UpperCamelCase : Union[str, Any] = 0 else: with open(cache_version_file) as f: try: _UpperCamelCase : Optional[Any] = int(f.read()) except ValueError: _UpperCamelCase : Any = 0 if cache_version < 1: _UpperCamelCase : int = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( "The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your " "existing cached models. This is a one-time operation, you can interrupt it or run it " "later by calling `diffusers.utils.hub_utils.move_cache()`." ) try: move_cache() except Exception as e: _UpperCamelCase : Any = "\n".join(traceback.format_tb(e.__traceback__)) logger.error( f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' "file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole " "message and we will do our best to help." ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, "w") as f: f.write("1") except Exception: logger.warning( f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' "the directory exists and can be written to." ) def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ): '''simple docstring''' if variant is not None: lowercase__ : int = weights_name.split('.' ) lowercase__ : Dict = splits[:-1] + [variant] + splits[-1:] lowercase__ : int = """.""".join(SCREAMING_SNAKE_CASE__ ) return weights_name def a_ ( _lowerCAmelCase : Union[str, Any] , *, _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : int=None , ): '''simple docstring''' lowercase__ : int = str(SCREAMING_SNAKE_CASE__ ) if os.path.isfile(SCREAMING_SNAKE_CASE__ ): return pretrained_model_name_or_path elif os.path.isdir(SCREAMING_SNAKE_CASE__ ): if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): # Load from a PyTorch checkpoint lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ): lowercase__ : List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) return model_file else: raise EnvironmentError( f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('0.20.0' ) ): try: lowercase__ : Tuple = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) warnings.warn( f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.""" , SCREAMING_SNAKE_CASE__ , ) return model_file except: # noqa: E722 warnings.warn( f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.""" , SCREAMING_SNAKE_CASE__ , ) try: # 2. Load model file as usual lowercase__ : List[str] = hf_hub_download( SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ 'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ' 'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ' 'login`.' ) except RevisionNotFoundError: raise EnvironmentError( f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ 'this model name. Check the model page at ' f"""\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( f"""We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it""" f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" f""" directory containing a file named {weights_name} or""" ' \nCheckout your internet connection or see how to run the library in' ' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' ) except EnvironmentError: raise EnvironmentError( f"""Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from """ '\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ' f"""Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory """ f"""containing a file named {weights_name}""" )
704
"""simple docstring""" import math def a_ ( _lowerCAmelCase : int = 100 ): '''simple docstring''' lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) ) lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
645
0
"""simple docstring""" from __future__ import absolute_import, division, print_function, unicode_literals from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers import RobertaConfig from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.roberta.modeling_roberta import ( ROBERTA_INPUTS_DOCSTRING, ROBERTA_START_DOCSTRING, RobertaEmbeddings, ) from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy @add_start_docstrings( "The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE): lowerCamelCase__ : int = RobertaConfig lowerCamelCase__ : int = "roberta" def __init__( self , a ) -> Optional[Any]: super().__init__(__snake_case ) lowercase__ : Optional[int] = RobertaEmbeddings(__snake_case ) self.init_weights() @add_start_docstrings( "RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , __SCREAMING_SNAKE_CASE , ) class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE): lowerCamelCase__ : Any = RobertaConfig lowerCamelCase__ : int = "roberta" def __init__( self , a ) -> Optional[Any]: super().__init__(__snake_case ) lowercase__ : Any = config.num_labels lowercase__ : Union[str, Any] = config.num_hidden_layers lowercase__ : Optional[int] = DeeRobertaModel(__snake_case ) lowercase__ : Dict = nn.Dropout(config.hidden_dropout_prob ) lowercase__ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels ) @add_start_docstrings_to_model_forward(__snake_case ) def _UpperCAmelCase ( self , a=None , a=None , a=None , a=None , a=None , a=None , a=None , a=-1 , a=False , ) -> Tuple: lowercase__ : List[str] = self.num_layers try: lowercase__ : Tuple = self.roberta( __snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , ) lowercase__ : int = outputs[1] lowercase__ : str = self.dropout(__snake_case ) lowercase__ : List[Any] = self.classifier(__snake_case ) lowercase__ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here except HighwayException as e: lowercase__ : Union[str, Any] = e.message lowercase__ : List[Any] = e.exit_layer lowercase__ : Union[str, Any] = outputs[0] if not self.training: lowercase__ : List[str] = entropy(__snake_case ) lowercase__ : Union[str, Any] = [] lowercase__ : Dict = [] if labels is not None: if self.num_labels == 1: # We are doing regression lowercase__ : List[str] = MSELoss() lowercase__ : Tuple = loss_fct(logits.view(-1 ) , labels.view(-1 ) ) else: lowercase__ : List[str] = CrossEntropyLoss() lowercase__ : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) # work with highway exits lowercase__ : List[Any] = [] for highway_exit in outputs[-1]: lowercase__ : Optional[int] = highway_exit[0] if not self.training: highway_logits_all.append(__snake_case ) highway_entropy.append(highway_exit[2] ) if self.num_labels == 1: # We are doing regression lowercase__ : Dict = MSELoss() lowercase__ : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) ) else: lowercase__ : int = CrossEntropyLoss() lowercase__ : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) highway_losses.append(__snake_case ) if train_highway: lowercase__ : Dict = (sum(highway_losses[:-1] ),) + outputs # exclude the final highway, of course else: lowercase__ : Union[str, Any] = (loss,) + outputs if not self.training: lowercase__ : List[str] = outputs + ((original_entropy, highway_entropy), exit_layer) if output_layer >= 0: lowercase__ : Union[str, Any] = ( (outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:] ) # use the highway of the last layer return outputs # (loss), logits, (hidden_states), (attentions), entropy
705
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : List[Any] = controlnet_params lowercase__ : int = 'bird' lowercase__ : List[Any] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples ) lowercase__ : List[Any] = jax.random.PRNGKey(0 ) lowercase__ : Tuple = jax.random.split(a , jax.device_count() ) lowercase__ : str = replicate(a ) lowercase__ : List[str] = shard(a ) lowercase__ : Dict = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : Optional[Any] = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : Optional[Any] = controlnet_params lowercase__ : List[Any] = 'Chef in the kitchen' lowercase__ : List[str] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) lowercase__ : List[str] = jax.random.PRNGKey(0 ) lowercase__ : str = jax.random.split(a , jax.device_count() ) lowercase__ : Optional[Any] = replicate(a ) lowercase__ : Optional[Any] = shard(a ) lowercase__ : List[Any] = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : str = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
645
0
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[str] = 'ZinengTang/tvlt-base' lowercase__ : Optional[int] = tempfile.mkdtemp() def _UpperCAmelCase ( self , **a ) -> List[str]: return TvltImageProcessor.from_pretrained(self.checkpoint , **a ) def _UpperCAmelCase ( self , **a ) -> Optional[int]: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a ) def _UpperCAmelCase ( self ) -> str: shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : int = self.get_image_processor() lowercase__ : int = self.get_feature_extractor() lowercase__ : Any = TvltProcessor(image_processor=a , feature_extractor=a ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , a ) self.assertIsInstance(processor.image_processor , a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : str = self.get_image_processor() lowercase__ : Tuple = self.get_feature_extractor() lowercase__ : Any = TvltProcessor(image_processor=a , feature_extractor=a ) lowercase__ : Tuple = np.ones([1_2_0_0_0] ) lowercase__ : int = feature_extractor(a , return_tensors='np' ) lowercase__ : str = processor(audio=a , return_tensors='np' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Dict = self.get_feature_extractor() lowercase__ : int = TvltProcessor(image_processor=a , feature_extractor=a ) lowercase__ : str = np.ones([3, 2_2_4, 2_2_4] ) lowercase__ : str = image_processor(a , return_tensors='np' ) lowercase__ : List[Any] = processor(images=a , return_tensors='np' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : int = self.get_image_processor() lowercase__ : Dict = self.get_feature_extractor() lowercase__ : Tuple = TvltProcessor(image_processor=a , feature_extractor=a ) lowercase__ : Optional[Any] = np.ones([1_2_0_0_0] ) lowercase__ : Dict = np.ones([3, 2_2_4, 2_2_4] ) lowercase__ : Optional[int] = processor(audio=a , images=a ) self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] ) # test if it raises when no input is passed with pytest.raises(a ): processor() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.get_image_processor() lowercase__ : Optional[int] = self.get_feature_extractor() lowercase__ : Optional[int] = TvltProcessor(image_processor=a , feature_extractor=a ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
706
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _UpperCamelCase : str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Any = ["MLukeTokenizer"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
707
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' ) lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' ) lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(**a ) lowercase__ : List[Any] = outputs.logits lowercase__ : Union[str, Any] = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , a ) lowercase__ : Tuple = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
645
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging _UpperCamelCase : Any = logging.get_logger(__name__) _UpperCamelCase : int = "▁" _UpperCamelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"} _UpperCamelCase : str = { "vocab_file": { "facebook/nllb-200-distilled-600M": ( "https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model" ), } } _UpperCamelCase : Optional[int] = { "facebook/nllb-200-distilled-600M": 10_24, } # fmt: off _UpperCamelCase : Tuple = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"] class UpperCAmelCase_ ( UpperCAmelCase_): lowerCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Optional[Any] = ["input_ids", "attention_mask"] lowerCamelCase__ : List[int] = [] lowerCamelCase__ : List[int] = [] def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=None , a=None , a=None , a = None , a=None , a=False , **a , ) -> Any: lowercase__ : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token lowercase__ : Any = {} if sp_model_kwargs is None else sp_model_kwargs lowercase__ : List[str] = legacy_behaviour super().__init__( bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , tokenizer_file=_snake_case , src_lang=_snake_case , tgt_lang=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_snake_case , **_snake_case , ) lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_snake_case ) ) lowercase__ : List[str] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' # spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s' # Mimic fairseq token-to-id alignment for the first 4 token lowercase__ : Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase__ : int = 1 lowercase__ : List[str] = len(self.sp_model ) lowercase__ : List[Any] = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_snake_case ) } lowercase__ : Union[str, Any] = {v: k for k, v in self.lang_code_to_id.items()} lowercase__ : int = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowercase__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowercase__ : Dict = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) lowercase__ : int = src_lang if src_lang is not None else 'eng_Latn' lowercase__ : Tuple = self.lang_code_to_id[self._src_lang] lowercase__ : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> str: lowercase__ : str = self.__dict__.copy() lowercase__ : int = None lowercase__ : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , a ) -> Tuple: lowercase__ : Union[str, Any] = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): lowercase__ : str = {} lowercase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def _UpperCAmelCase ( self ) -> int: return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _UpperCAmelCase ( self ) -> Optional[int]: return self._src_lang @src_lang.setter def _UpperCAmelCase ( self , a ) -> Dict: lowercase__ : Optional[int] = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def _UpperCAmelCase ( self , a , a = None , a = False ) -> Optional[Any]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case ) lowercase__ : List[str] = [1] * len(self.prefix_tokens ) lowercase__ : Dict = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_snake_case )) + suffix_ones return prefix_ones + ([0] * len(_snake_case )) + ([0] * len(_snake_case )) + suffix_ones def _UpperCAmelCase ( self , a , a = None ) -> int: if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _UpperCAmelCase ( self , a , a = None ) -> Dict: lowercase__ : List[Any] = [self.sep_token_id] lowercase__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self , a , a , a , a , **a ) -> str: if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowercase__ : Optional[Any] = src_lang lowercase__ : Union[str, Any] = self(_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , **_snake_case ) lowercase__ : Any = self.convert_tokens_to_ids(_snake_case ) lowercase__ : str = tgt_lang_id return inputs def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Tuple = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCAmelCase ( self , a ) -> Optional[int]: return self.sp_model.encode(_snake_case , out_type=_snake_case ) def _UpperCAmelCase ( self , a ) -> Optional[int]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase__ : str = self.sp_model.PieceToId(_snake_case ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _UpperCAmelCase ( self , a ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _UpperCAmelCase ( self , a ) -> List[str]: lowercase__ : str = ''.join(_snake_case ).replace(_snake_case , ' ' ).strip() return out_string def _UpperCAmelCase ( self , a , a = None ) -> Any: if not os.path.isdir(_snake_case ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowercase__ : Optional[int] = os.path.join( _snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , _snake_case ) elif not os.path.isfile(self.vocab_file ): with open(_snake_case , 'wb' ) as fi: lowercase__ : List[Any] = self.sp_model.serialized_model_proto() fi.write(_snake_case ) return (out_vocab_file,) def _UpperCAmelCase ( self , a , a = "eng_Latn" , a = None , a = "fra_Latn" , **a , ) -> Optional[int]: lowercase__ : str = src_lang lowercase__ : List[Any] = tgt_lang return super().prepare_seqaseq_batch(_snake_case , _snake_case , **_snake_case ) def _UpperCAmelCase ( self ) -> Optional[int]: return self.set_src_lang_special_tokens(self.src_lang ) def _UpperCAmelCase ( self ) -> List[str]: return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: lowercase__ : int = self.lang_code_to_id[src_lang] if self.legacy_behaviour: lowercase__ : List[str] = [] lowercase__ : int = [self.eos_token_id, self.cur_lang_code] else: lowercase__ : List[str] = [self.cur_lang_code] lowercase__ : str = [self.eos_token_id] def _UpperCAmelCase ( self , a ) -> List[str]: lowercase__ : Tuple = self.lang_code_to_id[lang] if self.legacy_behaviour: lowercase__ : Dict = [] lowercase__ : List[Any] = [self.eos_token_id, self.cur_lang_code] else: lowercase__ : Dict = [self.cur_lang_code] lowercase__ : Any = [self.eos_token_id]
708
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : @staticmethod def _UpperCAmelCase ( *a , **a ) -> int: pass def a_ ( _lowerCAmelCase : Image ): '''simple docstring''' lowercase__ : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , a , a ) -> Optional[int]: lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a ) import datasets lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowercase__ : List[Any] = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , a , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Tuple = 'Intel/dpt-large' lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a ) lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowercase__ : Optional[Any] = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: # This is highly irregular to have no small tests. self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
645
0
"""simple docstring""" from functools import lru_cache def a_ ( _lowerCAmelCase : Any ) -> Optional[Any]: '''simple docstring''' lowercase__ : Optional[Any] = 2 lowercase__ : List[str] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(_lowerCAmelCase ) if n > 1: factors.add(_lowerCAmelCase ) return factors @lru_cache def a_ ( _lowerCAmelCase : Optional[Any] ) -> Optional[int]: '''simple docstring''' return len(unique_prime_factors(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Tuple ) -> List[str]: '''simple docstring''' return len(set(_lowerCAmelCase ) ) in (0, 1) def a_ ( _lowerCAmelCase : Optional[Any] ) -> int: '''simple docstring''' lowercase__ : str = 2 while True: # Increment each value of a generated range lowercase__ : Union[str, Any] = [base + i for i in range(_lowerCAmelCase )] # Run elements through out unique_prime_factors function # Append our target number to the end. lowercase__ : Optional[int] = [upf_len(_lowerCAmelCase ) for x in group] checker.append(_lowerCAmelCase ) # If all numbers in the list are equal, return the group variable. if equality(_lowerCAmelCase ): return group # Increment our base variable by 1 base += 1 def a_ ( _lowerCAmelCase : Dict = 4 ) -> str: '''simple docstring''' lowercase__ : Union[str, Any] = run(_lowerCAmelCase ) return results[0] if len(_lowerCAmelCase ) else None if __name__ == "__main__": print(solution())
709
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class UpperCAmelCase_ ( _a): def __init__( self ) -> Any: lowercase__ : Tuple = [] def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_init_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]: self.events.append('on_train_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_train_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_epoch_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]: self.events.append('on_epoch_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_step_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> str: self.events.append('on_step_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_evaluate' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple: self.events.append('on_predict' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]: self.events.append('on_save' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_log' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_prediction_step' ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> str: lowercase__ : str = tempfile.mkdtemp() def _UpperCAmelCase ( self ) -> Dict: shutil.rmtree(self.output_dir ) def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowercase__ : str = RegressionDataset(length=a ) lowercase__ : Any = RegressionDataset(length=a ) lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a ) lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a ) lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a ) return Trainer( a , a , train_dataset=a , eval_dataset=a , callbacks=a , ) def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]: self.assertEqual(len(a ) , len(a ) ) # Order doesn't matter lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) for cba, cba in zip(a , a ): if isinstance(a , a ) and isinstance(a , a ): self.assertEqual(a , a ) elif isinstance(a , a ) and not isinstance(a , a ): self.assertEqual(a , cba.__class__ ) elif not isinstance(a , a ) and isinstance(a , a ): self.assertEqual(cba.__class__ , a ) else: self.assertEqual(a , a ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: lowercase__ : Dict = ['on_init_end', 'on_train_begin'] lowercase__ : List[Any] = 0 lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() ) lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(a ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : int = self.get_trainer() lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # Callbacks passed at init are added to the default callbacks lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a ) lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : List[str] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Optional[Any] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(a ) self.assertEqual(cb.__class__ , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # We can also add, pop, or remove by instance lowercase__ : int = self.get_trainer() lowercase__ : List[str] = trainer.callback_handler.callbacks[0] trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Tuple = self.get_trainer() lowercase__ : Dict = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(a ) self.assertEqual(a , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Tuple: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=a ) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # Independent log/save/eval lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: lowercase__ : str = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(a ) in warn_mock.call_args[0][0]
645
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer _UpperCamelCase : Tuple =logging.get_logger(__name__) _UpperCamelCase : Any ={"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart _UpperCamelCase : Any ={ "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } _UpperCamelCase : List[str] ={ "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class UpperCAmelCase_ ( UpperCamelCase__): lowerCamelCase__ : int = VOCAB_FILES_NAMES lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : Optional[int] = ["input_ids", "attention_mask"] lowerCamelCase__ : Optional[Any] = BartTokenizer def __init__( self , a=None , a=None , a=None , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , a=True , **a , ) -> Tuple: super().__init__( _a , _a , tokenizer_file=_a , errors=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , trim_offsets=_a , **_a , ) lowercase__ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' , _a ) != add_prefix_space: lowercase__ : Tuple = getattr(_a , pre_tok_state.pop('type' ) ) lowercase__ : Union[str, Any] = add_prefix_space lowercase__ : List[str] = pre_tok_class(**_a ) lowercase__ : List[Any] = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowercase__ : int = """post_processor""" lowercase__ : Optional[Any] = getattr(self.backend_tokenizer , _a , _a ) if tokenizer_component_instance: lowercase__ : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase__ : str = tuple(state['sep'] ) if "cls" in state: lowercase__ : List[str] = tuple(state['cls'] ) lowercase__ : str = False if state.get('add_prefix_space' , _a ) != add_prefix_space: lowercase__ : Any = add_prefix_space lowercase__ : Dict = True if state.get('trim_offsets' , _a ) != trim_offsets: lowercase__ : List[Any] = trim_offsets lowercase__ : Union[str, Any] = True if changes_to_apply: lowercase__ : List[str] = getattr(_a , state.pop('type' ) ) lowercase__ : int = component_class(**_a ) setattr(self.backend_tokenizer , _a , _a ) @property def _UpperCAmelCase ( self ) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self , a ) -> Tuple: lowercase__ : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else value lowercase__ : Any = value def _UpperCAmelCase ( self , *a , **a ) -> BatchEncoding: lowercase__ : Optional[int] = kwargs.get('is_split_into_words' , _a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._batch_encode_plus(*_a , **_a ) def _UpperCAmelCase ( self , *a , **a ) -> BatchEncoding: lowercase__ : str = kwargs.get('is_split_into_words' , _a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ 'to use it with pretokenized inputs.' ) return super()._encode_plus(*_a , **_a ) def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]: lowercase__ : Union[str, Any] = self._tokenizer.model.save(_a , name=_a ) return tuple(_a ) def _UpperCAmelCase ( self , a , a=None ) -> Tuple: lowercase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self , a , a = None ) -> List[int]: lowercase__ : List[Any] = [self.sep_token_id] lowercase__ : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
710
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _UpperCamelCase : str = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
645
0
"""simple docstring""" from pathlib import Path from typing import List from transformers import is_torch_available, is_vision_available from transformers.testing_utils import get_tests_dir, is_tool_test from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText if is_torch_available(): import torch if is_vision_available(): from PIL import Image _UpperCamelCase : int = ['text', 'image', 'audio'] def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : Union[str, Any] = [] for input_type in input_types: if input_type == "text": inputs.append('Text input' ) elif input_type == "image": inputs.append( Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) ) elif input_type == "audio": inputs.append(torch.ones(3000 ) ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): inputs.append(create_inputs(__UpperCamelCase ) ) else: raise ValueError(f"""Invalid type requested: {input_type}""" ) return inputs def a_ ( _lowerCAmelCase : List ): '''simple docstring''' lowercase__ : Any = [] for output in outputs: if isinstance(__UpperCamelCase , (str, AgentText) ): output_types.append('text' ) elif isinstance(__UpperCamelCase , (Image.Image, AgentImage) ): output_types.append('image' ) elif isinstance(__UpperCamelCase , (torch.Tensor, AgentAudio) ): output_types.append('audio' ) else: raise ValueError(f"""Invalid output: {output}""" ) return output_types @is_tool_test class UpperCAmelCase_ : def _UpperCAmelCase ( self ) -> Optional[Any]: self.assertTrue(hasattr(self.tool , 'inputs' ) ) self.assertTrue(hasattr(self.tool , 'outputs' ) ) lowercase__ : Dict = self.tool.inputs for _input in inputs: if isinstance(_input , UpperCamelCase__ ): for __input in _input: self.assertTrue(__input in authorized_types ) else: self.assertTrue(_input in authorized_types ) lowercase__ : int = self.tool.outputs for _output in outputs: self.assertTrue(_output in authorized_types ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Optional[Any] = create_inputs(self.tool.inputs ) lowercase__ : Tuple = self.tool(*UpperCamelCase__ ) # There is a single output if len(self.tool.outputs ) == 1: lowercase__ : Union[str, Any] = [outputs] self.assertListEqual(output_types(UpperCamelCase__ ) , self.tool.outputs ) def _UpperCAmelCase ( self ) -> Optional[Any]: self.assertTrue(hasattr(self.tool , 'description' ) ) self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) ) self.assertTrue(self.tool.description.startswith('This is a tool that' ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Any = create_inputs(self.tool.inputs ) lowercase__ : Optional[Any] = self.tool(*UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase__ : List[Any] = [outputs] self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) ) for output, output_type in zip(UpperCamelCase__ , self.tool.outputs ): lowercase__ : List[Any] = AGENT_TYPE_MAPPING[output_type] self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : Dict = create_inputs(self.tool.inputs ) lowercase__ : str = [] for _input, input_type in zip(UpperCamelCase__ , self.tool.inputs ): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] ) else: _inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) ) # Should not raise an error lowercase__ : Union[str, Any] = self.tool(*UpperCamelCase__ ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase__ : List[Any] = [outputs] self.assertEqual(len(UpperCamelCase__ ) , len(self.tool.outputs ) )
711
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self , a ) -> str: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): lowercase__ : str = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = 'sshleifer/tiny-gpt2' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : List[Any] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : Tuple = TensorFlowBenchmark(a , [config] ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : List[str] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : List[str] = TensorFlowBenchmark(a , [config] ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Optional[int] = AutoConfig.from_pretrained(a ) lowercase__ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a , [config] ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random' lowercase__ : Any = AutoConfig.from_pretrained(a ) lowercase__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : int = TensorFlowBenchmark(a , configs=[config] ) lowercase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , ) lowercase__ : Any = TensorFlowBenchmark(a ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Any = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , ) lowercase__ : Union[str, Any] = TensorFlowBenchmark(a ) benchmark.run() self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Tuple = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(a ): self.assertTrue(hasattr(a , 'sequential' ) ) self.assertTrue(hasattr(a , 'cumulative' ) ) self.assertTrue(hasattr(a , 'current' ) ) self.assertTrue(hasattr(a , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , ) lowercase__ : Optional[int] = TensorFlowBenchmark(a ) lowercase__ : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
645
0
"""simple docstring""" import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def a_ ( _lowerCAmelCase : int ): '''simple docstring''' if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class UpperCAmelCase_ ( nn.Module): def __init__( self , a , a ) -> int: super().__init__() lowercase__ : Any = module lowercase__ : int = nn.Sequential( nn.Linear(module.in_features , _lowerCamelCase , bias=_lowerCamelCase ) , nn.Linear(_lowerCamelCase , module.out_features , bias=_lowerCamelCase ) , ) lowercase__ : Dict = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=_lowerCamelCase ) nn.init.zeros_(self.adapter[1].weight ) self.adapter.to(module.weight.device ) def _UpperCAmelCase ( self , a , *a , **a ) -> Dict: return self.module(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) + self.adapter(_lowerCamelCase ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Any = "bigscience/bloom-1b7" # Constant values lowerCamelCase__ : str = 2.109659552692574 lowerCamelCase__ : Tuple = "Hello my name is" lowerCamelCase__ : Any = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University") lowerCamelCase__ : List[str] = 1_0 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : List[str] = AutoTokenizer.from_pretrained(self.model_name ) class UpperCAmelCase_ ( lowerCAmelCase__): def _UpperCAmelCase ( self ) -> str: super().setUp() # Models and tokenizer lowercase__ : Dict = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map='auto' ) lowercase__ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) def _UpperCAmelCase ( self ) -> int: del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: lowercase__ : List[Any] = self.model_abit.config self.assertTrue(hasattr(_lowerCamelCase , 'quantization_config' ) ) lowercase__ : Optional[Any] = config.to_dict() lowercase__ : Optional[Any] = config.to_diff_dict() lowercase__ : Any = config.to_json_string() def _UpperCAmelCase ( self ) -> Union[str, Any]: from bitsandbytes.nn import Paramsabit lowercase__ : Tuple = self.model_fpaa.get_memory_footprint() lowercase__ : Any = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE ) lowercase__ : Any = get_some_linear_layer(self.model_abit ) self.assertTrue(linear.weight.__class__ == Paramsabit ) def _UpperCAmelCase ( self ) -> Tuple: from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(_lowerCamelCase , torch.nn.Linear ): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) lowercase__ : Dict = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = BitsAndBytesConfig() lowercase__ : Any = True lowercase__ : str = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_lowerCamelCase , device_map='auto' ) lowercase__ : Dict = self.tokenizer(self.input_text , return_tensors='pt' ) lowercase__ : Optional[int] = model_abit_from_config.generate( input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS ) def _UpperCAmelCase ( self ) -> Any: with self.assertRaises(_lowerCamelCase ), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(_lowerCamelCase ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[Any] = BitsAndBytesConfig() with self.assertRaises(_lowerCamelCase ): lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=_lowerCamelCase , load_in_abit=_lowerCamelCase , device_map='auto' , bnb_abit_quant_type='nf4' , ) def _UpperCAmelCase ( self ) -> Optional[int]: with self.assertRaises(_lowerCamelCase ): # Tries with `str` self.model_abit.to('cpu' ) with self.assertRaises(_lowerCamelCase ): # Tries with a `dtype`` self.model_abit.to(torch.floataa ) with self.assertRaises(_lowerCamelCase ): # Tries with a `device` self.model_abit.to(torch.device('cuda:0' ) ) with self.assertRaises(_lowerCamelCase ): # Tries with a `device` self.model_abit.float() with self.assertRaises(_lowerCamelCase ): # Tries with a `device` self.model_abit.half() # Test if we did not break anything lowercase__ : List[str] = self.tokenizer(self.input_text , return_tensors='pt' ) lowercase__ : Union[str, Any] = self.model_fpaa.to(torch.floataa ) lowercase__ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 ) # Check this does not throw an error lowercase__ : Union[str, Any] = self.model_fpaa.to('cpu' ) # Check this does not throw an error lowercase__ : Optional[Any] = self.model_fpaa.half() # Check this does not throw an error lowercase__ : Dict = self.model_fpaa.float() def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_lowerCamelCase , device_map='auto' ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa ) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class UpperCAmelCase_ ( unittest.TestCase): @classmethod def _UpperCAmelCase ( cls ) -> List[Any]: lowercase__ : List[str] = 't5-small' lowercase__ : Union[str, Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense lowercase__ : Any = AutoTokenizer.from_pretrained(cls.model_name ) lowercase__ : Tuple = 'Translate in German: Hello, my dog is cute' def _UpperCAmelCase ( self ) -> str: gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> List[Any]: from transformers import TaForConditionalGeneration lowercase__ : Dict = TaForConditionalGeneration._keep_in_fpaa_modules lowercase__ : Tuple = None # test with `t5-small` lowercase__ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) lowercase__ : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowercase__ : Any = model.generate(**_lowerCamelCase ) # test with `flan-t5-small` lowercase__ : Dict = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) lowercase__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowercase__ : Tuple = model.generate(**_lowerCamelCase ) lowercase__ : str = modules def _UpperCAmelCase ( self ) -> List[Any]: import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` lowercase__ : List[Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) ) lowercase__ : Tuple = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowercase__ : str = model.generate(**_lowerCamelCase ) # test with `flan-t5-small` lowercase__ : Optional[int] = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) lowercase__ : int = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 ) lowercase__ : Union[str, Any] = model.generate(**_lowerCamelCase ) class UpperCAmelCase_ ( lowerCAmelCase__): def _UpperCAmelCase ( self ) -> Union[str, Any]: super().setUp() # model_name lowercase__ : Optional[Any] = 'bigscience/bloom-560m' lowercase__ : Optional[int] = 't5-small' # Different types of model lowercase__ : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) # Sequence classification model lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) # CausalLM model lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase , device_map='auto' ) # Seq2seq model lowercase__ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=_lowerCamelCase , device_map='auto' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> str: from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit ) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter ) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter ) class UpperCAmelCase_ ( lowerCAmelCase__): def _UpperCAmelCase ( self ) -> Tuple: super().setUp() def _UpperCAmelCase ( self ) -> Dict: del self.pipe gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Any = pipeline( 'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass lowercase__ : Optional[Any] = self.pipe(self.input_text ) self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS ) @require_torch_multi_gpu class UpperCAmelCase_ ( lowerCAmelCase__): def _UpperCAmelCase ( self ) -> Optional[Any]: super().setUp() def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=_lowerCamelCase , device_map='balanced' ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} ) # Check that inference pass works on the model lowercase__ : Any = self.tokenizer(self.input_text , return_tensors='pt' ) # Second real batch lowercase__ : Dict = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=1_0 ) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_lowerCamelCase ) , self.EXPECTED_OUTPUTS ) class UpperCAmelCase_ ( lowerCAmelCase__): def _UpperCAmelCase ( self ) -> int: lowercase__ : Tuple = 'facebook/opt-350m' super().setUp() def _UpperCAmelCase ( self ) -> str: if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ): return # Step 1: freeze all parameters lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_lowerCamelCase ) self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} ) for param in model.parameters(): lowercase__ : Optional[Any] = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability lowercase__ : List[str] = param.data.to(torch.floataa ) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(_lowerCamelCase ) ): lowercase__ : List[Any] = LoRALayer(module.q_proj , rank=1_6 ) lowercase__ : str = LoRALayer(module.k_proj , rank=1_6 ) lowercase__ : Optional[int] = LoRALayer(module.v_proj , rank=1_6 ) # Step 3: dummy batch lowercase__ : Optional[int] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 ) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): lowercase__ : List[str] = model.forward(**_lowerCamelCase ) out.logits.norm().backward() for module in model.modules(): if isinstance(_lowerCamelCase , _lowerCamelCase ): self.assertTrue(module.adapter[1].weight.grad is not None ) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 ) elif isinstance(_lowerCamelCase , nn.Embedding ): self.assertTrue(module.weight.grad is None ) class UpperCAmelCase_ ( lowerCAmelCase__): lowerCamelCase__ : Dict = "gpt2-xl" lowerCamelCase__ : Union[str, Any] = 3.3191854854152187
712
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class UpperCAmelCase_ ( _a): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any: lowercase__ : Tuple = parent lowercase__ : List[Any] = batch_size lowercase__ : List[Any] = seq_length lowercase__ : List[Any] = is_training lowercase__ : Optional[Any] = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : int = use_labels lowercase__ : Tuple = vocab_size lowercase__ : int = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : List[str] = type_vocab_size lowercase__ : Tuple = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = num_labels lowercase__ : Tuple = num_choices lowercase__ : str = scope def _UpperCAmelCase ( self ) -> Any: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_input_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Dict = None lowercase__ : Optional[Any] = None lowercase__ : int = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> Optional[int]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Tuple = DistilBertModel(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , a ) lowercase__ : str = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a ) model.to(a ) model.eval() lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int: lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]: lowercase__ : int = self.num_labels lowercase__ : Dict = DistilBertForSequenceClassification(a ) model.to(a ) model.eval() lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any: lowercase__ : Any = self.num_labels lowercase__ : List[str] = DistilBertForTokenClassification(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple: lowercase__ : List[Any] = self.num_choices lowercase__ : Any = DistilBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : List[str] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ : str = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Any = True lowerCamelCase__ : List[Any] = True lowerCamelCase__ : Optional[Any] = True def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : str = DistilBertModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 ) def _UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = DistilBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @slow @require_torch_gpu def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase__ : Optional[int] = True lowercase__ : Union[str, Any] = model_class(config=a ) lowercase__ : int = self._prepare_for_class(a , a ) lowercase__ : Tuple = torch.jit.trace( a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) ) lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a ) loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' ) lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__ : Optional[Any] = model(a , attention_mask=a )[0] lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , a ) lowercase__ : List[Any] = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : list ): '''simple docstring''' if any(not isinstance(__snake_case , __snake_case ) or x < 0 for x in sequence ): raise TypeError('Sequence must be list of non-negative integers' ) for _ in range(len(__snake_case ) ): for i, (rod_upper, rod_lower) in enumerate(zip(__snake_case , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
713
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ : Optional[Any] = len(a__ ) lowercase__ : int = [] for i in range(len(a__ ) - pat_len + 1 ): lowercase__ : str = True for j in range(a__ ): if s[i + j] != pattern[j]: lowercase__ : List[str] = False break if match_found: position.append(a__ ) return position if __name__ == "__main__": assert naive_pattern_search("ABCDEFG", "DE") == [3] print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
714
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any: lowercase__ : List[str] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[int] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : str = is_training lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[Any] = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Any = type_sequence_label_size lowercase__ : Dict = initializer_range lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Tuple = n_targets lowercase__ : Optional[int] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase__ : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase__ : int = [] for i in range(self.batch_size ): lowercase__ : Optional[Any] = {} lowercase__ : Any = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=a ) lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a ) labels.append(a ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> List[Any]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _UpperCAmelCase ( self , a , a , a ) -> int: lowercase__ : List[str] = YolosModel(config=a ) model.to(a ) model.eval() lowercase__ : List[Any] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = YolosForObjectDetection(a ) model.to(a ) model.eval() lowercase__ : Dict = model(pixel_values=a ) lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase__ : str = model(pixel_values=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs lowercase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCamelCase__ : List[str] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Tuple = False lowerCamelCase__ : Union[str, Any] = False def _UpperCAmelCase ( self , a , a , a=False ) -> Dict: lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase__ : Optional[Any] = [] for i in range(self.model_tester.batch_size ): lowercase__ : Dict = {} lowercase__ : Dict = torch.ones( size=(self.model_tester.n_targets,) , device=a , dtype=torch.long ) lowercase__ : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=a , dtype=torch.float ) labels.append(a ) lowercase__ : Union[str, Any] = labels return inputs_dict def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Dict = YolosModelTester(self ) lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: # YOLOS does not use inputs_embeds pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) lowercase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict = True # in YOLOS, the seq_len is different lowercase__ : Tuple = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : str = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : Optional[int] = True lowercase__ : List[Any] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[str] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : Dict = len(a ) # Check attention is always last and order is fine lowercase__ : Any = True lowercase__ : int = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowercase__ : Tuple = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> List[str]: def check_hidden_states_output(a , a , a ): lowercase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(a , a ) ) lowercase__ : int = outputs.hidden_states lowercase__ : Any = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a ) , a ) # YOLOS has a different seq_length lowercase__ : Optional[int] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[Any] = True check_hidden_states_output(a , a , a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*a ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : int = YolosModel.from_pretrained(a ) self.assertIsNotNone(a ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a ) lowercase__ : Tuple = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : int = model(inputs.pixel_values ) # verify outputs lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : Any = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , ) lowercase__ : List[str] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) ) # verify postprocessing lowercase__ : Optional[Any] = image_processor.post_process_object_detection( a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a ) lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7] lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , a ) self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
645
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _UpperCamelCase : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def a_ ( ): '''simple docstring''' lowercase__ : List[str] = os.path.dirname(os.path.realpath(UpperCamelCase__ ) ) lowercase__ : str = os.path.join(UpperCamelCase__ , 'words.txt' ) lowercase__ : str = '' with open(UpperCamelCase__ ) as f: lowercase__ : Dict = f.readline() lowercase__ : List[str] = [word.strip('\"' ) for word in words.strip('\r\n' ).split(',' )] lowercase__ : Union[str, Any] = [ word for word in [sum(ord(UpperCamelCase__ ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(UpperCamelCase__ ) if __name__ == "__main__": print(solution())
715
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _UpperCamelCase : int = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple: lowercase__ : str = load_in_abit lowercase__ : str = load_in_abit lowercase__ : List[str] = llm_inta_threshold lowercase__ : Dict = llm_inta_skip_modules lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload lowercase__ : Any = llm_inta_has_fpaa_weight lowercase__ : Any = bnb_abit_quant_type lowercase__ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: lowercase__ : Dict = torch.floataa elif isinstance(a , a ): lowercase__ : Any = getattr(a , a ) elif isinstance(a , torch.dtype ): lowercase__ : Any = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _UpperCAmelCase ( self ) -> str: if not isinstance(self.llm_inta_threshold , a ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , a ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , a ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , a ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _UpperCAmelCase ( self ) -> Tuple: return self.load_in_abit or self.load_in_abit def _UpperCAmelCase ( self ) -> List[str]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]: lowercase__ : List[Any] = cls(**a ) lowercase__ : Union[str, Any] = [] for key, value in kwargs.items(): if hasattr(a , a ): setattr(a , a , a ) to_remove.append(a ) for key in to_remove: kwargs.pop(a , a ) if return_unused_kwargs: return config, kwargs else: return config def _UpperCAmelCase ( self , a ) -> Dict: with open(a , 'w' , encoding='utf-8' ) as writer: lowercase__ : Any = self.to_dict() lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n' writer.write(a ) def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self ) -> Dict: return f"""{self.__class__.__name__} {self.to_json_string()}""" def _UpperCAmelCase ( self , a = True ) -> str: if use_diff is True: lowercase__ : List[Any] = self.to_diff_dict() else: lowercase__ : List[str] = self.to_dict() return json.dumps(a , indent=2 , sort_keys=a ) + "\n" def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Tuple = self.to_dict() # get the default config dict lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict() lowercase__ : int = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: lowercase__ : Optional[int] = value return serializable_config_dict
645
0
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class UpperCAmelCase_ ( __UpperCAmelCase): def __init__( self , a = "▁" , a = True , a = "<unk>" , a = "</s>" , a = "<pad>" , ) -> Optional[Any]: lowercase__ : List[str] = { 'pad': {'id': 0, 'token': pad_token}, 'eos': {'id': 1, 'token': eos_token}, 'unk': {'id': 2, 'token': unk_token}, } lowercase__ : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): lowercase__ : Tuple = token_dict['token'] lowercase__ : List[Any] = Tokenizer(Unigram() ) lowercase__ : str = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(' {2,}' ) , ' ' ), normalizers.Lowercase(), ] ) lowercase__ : Optional[int] = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ), pre_tokenizers.Digits(individual_digits=__SCREAMING_SNAKE_CASE ), pre_tokenizers.Punctuation(), ] ) lowercase__ : Any = decoders.Metaspace(replacement=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE ) lowercase__ : Union[str, Any] = TemplateProcessing( single=f"""$A {self.special_tokens["eos"]["token"]}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , ) lowercase__ : str = { 'model': 'SentencePieceUnigram', 'replacement': replacement, 'add_prefix_space': add_prefix_space, } super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def _UpperCAmelCase ( self , a , a = 8_0_0_0 , a = True , ) -> Tuple: lowercase__ : List[str] = trainers.UnigramTrainer( vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase__ : Union[str, Any] = [files] self._tokenizer.train(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE ) self.add_unk_id() def _UpperCAmelCase ( self , a , a = 8_0_0_0 , a = True , ) -> Tuple: lowercase__ : List[str] = trainers.UnigramTrainer( vocab_size=__SCREAMING_SNAKE_CASE , special_tokens=self.special_tokens_list , show_progress=__SCREAMING_SNAKE_CASE , ) self._tokenizer.train_from_iterator(__SCREAMING_SNAKE_CASE , trainer=__SCREAMING_SNAKE_CASE ) self.add_unk_id() def _UpperCAmelCase ( self ) -> Dict: lowercase__ : int = json.loads(self._tokenizer.to_str() ) lowercase__ : int = self.special_tokens['unk']['id'] lowercase__ : List[str] = Tokenizer.from_str(json.dumps(__SCREAMING_SNAKE_CASE ) )
716
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCamelCase : int = 16 _UpperCamelCase : Union[str, Any] = 32 def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' return int(x / 2**20 ) class UpperCAmelCase_ : def __enter__( self ) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ : List[str] = torch.cuda.memory_allocated() return self def __exit__( self , *a ) -> Any: gc.collect() torch.cuda.empty_cache() lowercase__ : Optional[Any] = torch.cuda.memory_allocated() lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated() lowercase__ : List[Any] = bamb(self.end - self.begin ) lowercase__ : List[Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ): '''simple docstring''' lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase__ : Union[str, Any] = load_dataset( 'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} ) def tokenize_function(_lowerCAmelCase : int ): # max_length=None => use the model max length (it's actually the default) lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : Union[str, Any] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) lowercase__ : Dict = DataLoader( tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[int] = config['lr'] lowercase__ : Optional[Any] = int(config['num_epochs'] ) lowercase__ : Optional[Any] = int(config['seed'] ) lowercase__ : int = int(config['batch_size'] ) lowercase__ : Union[str, Any] = args.model_name_or_path set_seed(_lowerCAmelCase ) lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer lowercase__ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ : List[Any] = 1 lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over lowercase__ : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Tuple = 0 # Now we train the model lowercase__ : Optional[Any] = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCAmelCase ): lowercase__ : List[Any] = model(**_lowerCAmelCase ) lowercase__ : Dict = outputs.loss lowercase__ : int = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , ) parser.add_argument( '--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , ) lowercase__ : Any = parser.parse_args() lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
645
0
"""simple docstring""" from __future__ import annotations import math import random from typing import Any class UpperCAmelCase_ : def __init__( self ) -> None: lowercase__ : list[Any] = [] lowercase__ : int = 0 lowercase__ : int = 0 def _UpperCAmelCase ( self ) -> bool: return self.head == self.tail def _UpperCAmelCase ( self , a ) -> None: self.data.append(__a ) lowercase__ : Optional[Any] = self.tail + 1 def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.data[self.head] lowercase__ : Union[str, Any] = self.head + 1 return ret def _UpperCAmelCase ( self ) -> int: return self.tail - self.head def _UpperCAmelCase ( self ) -> None: print(self.data ) print('**************' ) print(self.data[self.head : self.tail] ) class UpperCAmelCase_ : def __init__( self , a ) -> None: lowercase__ : Tuple = data lowercase__ : MyNode | None = None lowercase__ : MyNode | None = None lowercase__ : int = 1 def _UpperCAmelCase ( self ) -> Any: return self.data def _UpperCAmelCase ( self ) -> MyNode | None: return self.left def _UpperCAmelCase ( self ) -> MyNode | None: return self.right def _UpperCAmelCase ( self ) -> int: return self.height def _UpperCAmelCase ( self , a ) -> None: lowercase__ : Optional[int] = data def _UpperCAmelCase ( self , a ) -> None: lowercase__ : List[str] = node def _UpperCAmelCase ( self , a ) -> None: lowercase__ : Tuple = node def _UpperCAmelCase ( self , a ) -> None: lowercase__ : List[str] = height def a_ ( _lowerCAmelCase : MyNode | None ): '''simple docstring''' if node is None: return 0 return node.get_height() def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ): '''simple docstring''' if a > b: return a return b def a_ ( _lowerCAmelCase : MyNode ): '''simple docstring''' print('left rotation node:' , node.get_data() ) lowercase__ : List[str] = node.get_left() assert ret is not None node.set_left(ret.get_right() ) ret.set_right(_lowerCAmelCase ) lowercase__ : Union[str, Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) lowercase__ : Any = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_lowerCAmelCase ) return ret def a_ ( _lowerCAmelCase : MyNode ): '''simple docstring''' print('right rotation node:' , node.get_data() ) lowercase__ : List[str] = node.get_right() assert ret is not None node.set_right(ret.get_left() ) ret.set_left(_lowerCAmelCase ) lowercase__ : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) lowercase__ : Optional[int] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1 ret.set_height(_lowerCAmelCase ) return ret def a_ ( _lowerCAmelCase : MyNode ): '''simple docstring''' lowercase__ : Dict = node.get_left() assert left_child is not None node.set_left(left_rotation(_lowerCAmelCase ) ) return right_rotation(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : MyNode ): '''simple docstring''' lowercase__ : List[str] = node.get_right() assert right_child is not None node.set_right(right_rotation(_lowerCAmelCase ) ) return left_rotation(_lowerCAmelCase ) def a_ ( _lowerCAmelCase : MyNode | None , _lowerCAmelCase : Any ): '''simple docstring''' if node is None: return MyNode(_lowerCAmelCase ) if data < node.get_data(): node.set_left(insert_node(node.get_left() , _lowerCAmelCase ) ) if ( get_height(node.get_left() ) - get_height(node.get_right() ) == 2 ): # an unbalance detected lowercase__ : Optional[Any] = node.get_left() assert left_child is not None if ( data < left_child.get_data() ): # new node is the left child of the left child lowercase__ : Union[str, Any] = right_rotation(_lowerCAmelCase ) else: lowercase__ : Any = lr_rotation(_lowerCAmelCase ) else: node.set_right(insert_node(node.get_right() , _lowerCAmelCase ) ) if get_height(node.get_right() ) - get_height(node.get_left() ) == 2: lowercase__ : Any = node.get_right() assert right_child is not None if data < right_child.get_data(): lowercase__ : List[str] = rl_rotation(_lowerCAmelCase ) else: lowercase__ : Dict = left_rotation(_lowerCAmelCase ) lowercase__ : Optional[int] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1 node.set_height(_lowerCAmelCase ) return node def a_ ( _lowerCAmelCase : MyNode ): '''simple docstring''' while True: lowercase__ : Optional[int] = root.get_right() if right_child is None: break lowercase__ : Any = right_child return root.get_data() def a_ ( _lowerCAmelCase : MyNode ): '''simple docstring''' while True: lowercase__ : Optional[Any] = root.get_left() if left_child is None: break lowercase__ : Tuple = left_child return root.get_data() def a_ ( _lowerCAmelCase : MyNode , _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ : Tuple = root.get_left() lowercase__ : Union[str, Any] = root.get_right() if root.get_data() == data: if left_child is not None and right_child is not None: lowercase__ : Tuple = get_left_most(_lowerCAmelCase ) root.set_data(_lowerCAmelCase ) root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) elif left_child is not None: lowercase__ : List[str] = left_child elif right_child is not None: lowercase__ : Dict = right_child else: return None elif root.get_data() > data: if left_child is None: print('No such data' ) return root else: root.set_left(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) else: # root.get_data() < data if right_child is None: return root else: root.set_right(del_node(_lowerCAmelCase , _lowerCAmelCase ) ) if get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == 2: assert right_child is not None if get_height(right_child.get_right() ) > get_height(right_child.get_left() ): lowercase__ : Optional[Any] = left_rotation(_lowerCAmelCase ) else: lowercase__ : Optional[Any] = rl_rotation(_lowerCAmelCase ) elif get_height(_lowerCAmelCase ) - get_height(_lowerCAmelCase ) == -2: assert left_child is not None if get_height(left_child.get_left() ) > get_height(left_child.get_right() ): lowercase__ : List[Any] = right_rotation(_lowerCAmelCase ) else: lowercase__ : Union[str, Any] = lr_rotation(_lowerCAmelCase ) lowercase__ : str = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1 root.set_height(_lowerCAmelCase ) return root class UpperCAmelCase_ : def __init__( self ) -> None: lowercase__ : MyNode | None = None def _UpperCAmelCase ( self ) -> int: return get_height(self.root ) def _UpperCAmelCase ( self , a ) -> None: print('insert:' + str(__a ) ) lowercase__ : Any = insert_node(self.root , __a ) def _UpperCAmelCase ( self , a ) -> None: print('delete:' + str(__a ) ) if self.root is None: print('Tree is empty!' ) return lowercase__ : Union[str, Any] = del_node(self.root , __a ) def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree lowercase__ : List[str] = '' lowercase__ : Optional[int] = MyQueue() q.push(self.root ) lowercase__ : Union[str, Any] = self.get_height() if layer == 0: return output lowercase__ : Union[str, Any] = 0 while not q.is_empty(): lowercase__ : Dict = q.pop() lowercase__ : Tuple = ' ' * int(math.pow(2 , layer - 1 ) ) output += space if node is None: output += "*" q.push(__a ) q.push(__a ) else: output += str(node.get_data() ) q.push(node.get_left() ) q.push(node.get_right() ) output += space lowercase__ : Optional[Any] = cnt + 1 for i in range(1_0_0 ): if cnt == math.pow(2 , __a ) - 1: lowercase__ : Dict = layer - 1 if layer == 0: output += "\n*************************************" return output output += "\n" break output += "\n*************************************" return output def a_ ( ): '''simple docstring''' import doctest doctest.testmod() if __name__ == "__main__": _test() _UpperCamelCase : Optional[int] = AVLtree() _UpperCamelCase : Optional[Any] = list(range(10)) random.shuffle(lst) for i in lst: t.insert(i) print(str(t)) random.shuffle(lst) for i in lst: t.del_node(i) print(str(t))
717
"""simple docstring""" def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Any = [0] * len(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): # use last results for better performance - dynamic programming lowercase__ : List[str] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : Dict = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Union[str, Any] = j return prefix_result def a_ ( _lowerCAmelCase : str ): '''simple docstring''' return max(prefix_function(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel _UpperCamelCase : str = { "text_branch": "text_model", "audio_branch": "audio_model.audio_encoder", "attn": "attention.self", "self.proj": "output.dense", "attention.self_mask": "attn_mask", "mlp.fc1": "intermediate.dense", "mlp.fc2": "output.dense", "norm1": "layernorm_before", "norm2": "layernorm_after", "bn0": "batch_norm", } _UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc") def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any]=False ): '''simple docstring''' lowercase__ , lowercase__ : List[Any] = create_model( 'HTSAT-tiny' , 'roberta' , _lowercase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_lowercase , fusion_type='aff_2d' if enable_fusion else None , ) return model, model_cfg def a_ ( _lowerCAmelCase : List[Any] ): '''simple docstring''' lowercase__ : Optional[Any] = {} lowercase__ : Any = R'.*sequential.(\d+).*' lowercase__ : Tuple = R'.*_projection.(\d+).*' for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowercase__ : Tuple = key.replace(_lowercase , _lowercase ) if re.match(_lowercase , _lowercase ): # replace sequential layers with list lowercase__ : Tuple = re.match(_lowercase , _lowercase ).group(1 ) lowercase__ : int = key.replace(f"""sequential.{sequential_layer}.""" , f"""layers.{int(_lowercase )//3}.linear.""" ) elif re.match(_lowercase , _lowercase ): lowercase__ : str = int(re.match(_lowercase , _lowercase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... lowercase__ : List[Any] = 1 if projecton_layer == 0 else 2 lowercase__ : List[str] = key.replace(f"""_projection.{projecton_layer}.""" , f"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value lowercase__ : Optional[Any] = value lowercase__ : str = mixed_qkv.size(0 ) // 3 lowercase__ : Optional[Any] = mixed_qkv[:qkv_dim] lowercase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2] lowercase__ : List[Any] = mixed_qkv[qkv_dim * 2 :] lowercase__ : Optional[Any] = query_layer lowercase__ : Union[str, Any] = key_layer lowercase__ : Optional[int] = value_layer else: lowercase__ : str = value return model_state_dict def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple=False ): '''simple docstring''' lowercase__ , lowercase__ : Optional[Any] = init_clap(_lowercase , enable_fusion=_lowercase ) clap_model.eval() lowercase__ : Tuple = clap_model.state_dict() lowercase__ : List[Any] = rename_state_dict(_lowercase ) lowercase__ : Optional[Any] = ClapConfig() lowercase__ : Dict = enable_fusion lowercase__ : List[str] = ClapModel(_lowercase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowercase , strict=_lowercase ) model.save_pretrained(_lowercase ) transformers_config.save_pretrained(_lowercase ) if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not") _UpperCamelCase : str = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
718
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]: lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0} lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} lowercase__ : Optional[int] = parent lowercase__ : Optional[int] = batch_size lowercase__ : str = num_channels lowercase__ : Any = image_size lowercase__ : Optional[Any] = min_resolution lowercase__ : int = max_resolution lowercase__ : List[Any] = do_resize lowercase__ : List[str] = size lowercase__ : str = do_center_crop lowercase__ : List[Any] = crop_size lowercase__ : Union[str, Any] = do_flip_channel_order def _UpperCAmelCase ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Tuple = MobileViTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_center_crop' ) ) self.assertTrue(hasattr(a , 'center_crop' ) ) self.assertTrue(hasattr(a , 'do_flip_channel_order' ) ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> str: # Initialize image_processing lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Dict: # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase : Optional[int] = { "configuration_blenderbot_small": [ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig", ], "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Union[str, Any] = ["BlenderbotSmallTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[Any] = [ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", "BlenderbotSmallPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = [ "TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : List[Any] = [ "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBlenderbotSmallPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys _UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
719
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict: lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : List[Any] = seq_length lowercase__ : int = is_training lowercase__ : str = use_attention_mask lowercase__ : Dict = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : Tuple = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Dict = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[str] = type_sequence_label_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Optional[int] = num_choices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : List[str] = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Any = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self ) @slow def _UpperCAmelCase ( self ) -> str: for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' ) lowercase__ : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' ) lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__ : Any = model(a , attention_mask=a )[0] lowercase__ : Tuple = (1, 1_1, 7_6_8) self.assertEqual(output.shape , a ) lowercase__ : Optional[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" from itertools import permutations def a_ ( _lowerCAmelCase : tuple ): '''simple docstring''' if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False lowercase__ : Tuple = [7, 11, 13, 17] for i, test in enumerate(lowercase_ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def a_ ( _lowerCAmelCase : int = 10 ): '''simple docstring''' return sum( int(''.join(map(lowercase_ , lowercase_ ) ) ) for num in permutations(range(lowercase_ ) ) if is_substring_divisible(lowercase_ ) ) if __name__ == "__main__": print(f'''{solution() = }''')
720
"""simple docstring""" from collections.abc import Sequence def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' lowercase__ : int = 0.0 for coeff in reversed(_lowerCAmelCase ): lowercase__ : List[Any] = result * x + coeff return result if __name__ == "__main__": _UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0) _UpperCamelCase : Dict = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
645
0
"""simple docstring""" class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE): pass class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE): pass class UpperCAmelCase_ : def __init__( self ) -> Tuple: lowercase__ : List[Any] = [ [], [], [], ] def _UpperCAmelCase ( self , a , a ) -> Dict: try: if len(self.queues[priority] ) >= 1_0_0: raise OverflowError('Maximum queue size is 100' ) self.queues[priority].append(_a ) except IndexError: raise ValueError('Valid priorities are 0, 1, and 2' ) def _UpperCAmelCase ( self ) -> str: for queue in self.queues: if queue: return queue.pop(0 ) raise UnderFlowError('All queues are empty' ) def __str__( self ) -> str: return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) ) class UpperCAmelCase_ : def __init__( self ) -> Tuple: lowercase__ : Union[str, Any] = [] def _UpperCAmelCase ( self , a ) -> Union[str, Any]: if len(self.queue ) == 1_0_0: raise OverFlowError('Maximum queue size is 100' ) self.queue.append(_a ) def _UpperCAmelCase ( self ) -> Any: if not self.queue: raise UnderFlowError('The queue is empty' ) else: lowercase__ : List[Any] = min(self.queue ) self.queue.remove(_a ) return data def __str__( self ) -> Dict: return str(self.queue ) def a_ ( ): '''simple docstring''' lowercase__ : Any = FixedPriorityQueue() fpq.enqueue(0 , 10 ) fpq.enqueue(1 , 70 ) fpq.enqueue(0 , 100 ) fpq.enqueue(2 , 1 ) fpq.enqueue(2 , 5 ) fpq.enqueue(1 , 7 ) fpq.enqueue(2 , 4 ) fpq.enqueue(1 , 64 ) fpq.enqueue(0 , 128 ) print(lowerCAmelCase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(lowerCAmelCase__ ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) print(fpq.dequeue() ) def a_ ( ): '''simple docstring''' lowercase__ : str = ElementPriorityQueue() epq.enqueue(10 ) epq.enqueue(70 ) epq.enqueue(100 ) epq.enqueue(1 ) epq.enqueue(5 ) epq.enqueue(7 ) epq.enqueue(4 ) epq.enqueue(64 ) epq.enqueue(128 ) print(lowerCAmelCase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(lowerCAmelCase__ ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) print(epq.dequeue() ) if __name__ == "__main__": fixed_priority_queue() element_priority_queue()
721
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _UpperCamelCase : Any = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def a_ ( _lowerCAmelCase : Optional[Any]=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a)) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = None def _UpperCAmelCase ( self , a , a ) -> List[Any]: with TemporaryDirectory() as tmp_dir: lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a ) lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a ) self.assertTrue(os.path.exists(a ) ) @pytest.mark.integration def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : Optional[int] = import_main_class(dataset_module.module_path ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowercase__ : Optional[int] = None builder_instance.download_and_prepare() lowercase__ : Optional[int] = builder_instance.as_dataset() assert ds @pytest.mark.integration def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert "train" in ds assert isinstance(ds['train'] , _lowerCAmelCase ) assert next(iter(ds['train'] ) )
645
0
"""simple docstring""" import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Union[str, Any] = LayoutLMTokenizer lowerCamelCase__ : Tuple = LayoutLMTokenizerFast lowerCamelCase__ : Union[str, Any] = True lowerCamelCase__ : Optional[Any] = True def _UpperCAmelCase ( self ) -> Any: super().setUp() lowercase__ : List[str] = [ '[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowercase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def _UpperCAmelCase ( self , **a ) -> Optional[int]: return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _UpperCAmelCase ( self , a ) -> Union[str, Any]: lowercase__ : Optional[Any] = 'UNwant\u00E9d,running' lowercase__ : List[Any] = 'unwanted, running' return input_text, output_text def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Tuple = self.tokenizer_class(self.vocab_file ) lowercase__ : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(UpperCamelCase__ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 1_0, 8, 9] ) def _UpperCAmelCase ( self ) -> List[Any]: pass
700
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a_ ( _lowerCAmelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): '''simple docstring''' lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_lowerCAmelCase , _lowerCAmelCase ) # Predict target for test data lowercase__ : str = xgb.predict(_lowerCAmelCase ) lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 ) return predictions def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = fetch_california_housing() lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split( _lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 ) lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
645
0
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig _UpperCamelCase : Any = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } _UpperCamelCase : Optional[int] = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case_): lowerCamelCase__ : Dict = """maskformer""" lowerCamelCase__ : int = {"""hidden_size""": """mask_feature_size"""} lowerCamelCase__ : str = ["""resnet""", """swin"""] lowerCamelCase__ : List[str] = ["""detr"""] def __init__( self , a = 2_5_6 , a = 2_5_6 , a = 0.1 , a = False , a = None , a = None , a = 0.02 , a = 1.0 , a = 1.0 , a = 1.0 , a = 20.0 , a = None , **a , ) -> Optional[int]: if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowercase__ : Optional[int] = SwinConfig( image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , ) if isinstance(a , a ): lowercase__ : str = backbone_config.pop('model_type' ) lowercase__ : int = CONFIG_MAPPING[backbone_model_type] lowercase__ : str = config_class.from_dict(a ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """ f"""Supported model types: {",".join(self.backbones_supported )}""" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowercase__ : Optional[int] = DetrConfig() else: # verify that the decoder is supported lowercase__ : Optional[int] = ( decoder_config.pop('model_type' ) if isinstance(a , a ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"""Transformer Decoder {decoder_type} not supported, please use one of""" f""" {",".join(self.decoders_supported )}""" ) if isinstance(a , a ): lowercase__ : str = CONFIG_MAPPING[decoder_type] lowercase__ : List[Any] = config_class.from_dict(a ) lowercase__ : Tuple = backbone_config lowercase__ : int = decoder_config # main feature dimension for the model lowercase__ : List[str] = fpn_feature_size lowercase__ : int = mask_feature_size # initializer lowercase__ : int = init_std lowercase__ : Dict = init_xavier_std # Hungarian matcher && loss lowercase__ : int = cross_entropy_weight lowercase__ : Tuple = dice_weight lowercase__ : Dict = mask_weight lowercase__ : int = use_auxiliary_loss lowercase__ : Tuple = no_object_weight lowercase__ : List[Any] = output_auxiliary_logits lowercase__ : Optional[int] = self.decoder_config.encoder_attention_heads lowercase__ : Tuple = self.decoder_config.num_hidden_layers super().__init__(**a ) @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> List[Any]: return cls( backbone_config=a , decoder_config=a , **a , ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = copy.deepcopy(self.__dict__ ) lowercase__ : Optional[int] = self.backbone_config.to_dict() lowercase__ : List[Any] = self.decoder_config.to_dict() lowercase__ : str = self.__class__.model_type return output
701
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]: lowercase__ : str = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Dict = patch_size lowercase__ : Tuple = tubelet_size lowercase__ : Optional[int] = num_frames lowercase__ : Optional[int] = is_training lowercase__ : int = use_labels lowercase__ : Optional[int] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = mask_ratio lowercase__ : Optional[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase__ : Optional[Any] = (image_size // patch_size) ** 2 lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase__ : str = int(mask_ratio * self.seq_length ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : Dict = VideoMAEModel(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = VideoMAEForPreTraining(a ) model.to(a ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Any = torch.ones((self.num_masks,) ) lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase__ : str = model(a , a ) # model only returns predictions for masked patches lowercase__ : str = mask.sum().item() lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowerCamelCase__ : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = False def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = VideoMAEModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]: lowercase__ : Union[str, Any] = copy.deepcopy(a ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase__ : Union[str, Any] = bool_masked_pos.to(a ) if return_labels: if model_class in [ *get_values(a ), ]: lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.has_attentions: pass else: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase__ : Optional[Any] = True lowercase__ : int = False lowercase__ : Any = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Dict = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : List[str] = len(a ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : List[str] = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) ) self.assertEqual(out_len + 1 , len(a ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a ) , a ) lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True check_hidden_states_output(a , a , a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> List[Any]: pass def a_ ( ): '''simple docstring''' lowercase__ : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase__ : str = np.load(_lowerCAmelCase ) return list(_lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( a ) lowercase__ : str = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : Union[str, Any] = model(**a ) # verify the logits lowercase__ : str = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a ) # add boolean mask, indicating which patches to mask lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) lowercase__ : str = torch.load(a ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**a ) # verify the logits lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase__ : List[str] = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a ) self.assertEqual(outputs.logits.shape , a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to( a ) with torch.no_grad(): lowercase__ : Any = model(**a ) lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
645
0
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : int = MobileBertConfig.from_json_file(SCREAMING_SNAKE_CASE_ ) print(f"""Building PyTorch model from configuration: {config}""" ) lowercase__ : List[str] = MobileBertForPreTraining(SCREAMING_SNAKE_CASE_ ) # Load weights from tf checkpoint lowercase__ : Dict = load_tf_weights_in_mobilebert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ ) if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _UpperCamelCase : int = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
702
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Dict = value elif weight_type == "weight_v": lowercase__ : List[str] = value elif weight_type == "bias": lowercase__ : Optional[Any] = value else: lowercase__ : List[str] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = [] lowercase__ : List[str] = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue lowercase__ : int = True if "*" in mapped_key: lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : List[Any] = 'weight_g' elif "weight_v" in name: lowercase__ : int = 'weight_v' elif "bias" in name: lowercase__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Union[str, Any] = 'weight' else: lowercase__ : int = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : int = full_name.split('conv_layers.' )[-1] lowercase__ : int = name.split('.' ) lowercase__ : int = int(items[0] ) lowercase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase__ : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ): '''simple docstring''' if config_path is not None: lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: lowercase__ : Any = UniSpeechSatConfig() lowercase__ : Union[str, Any] = '' if is_finetuned: lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase ) else: lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
645
0
"""simple docstring""" import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class UpperCAmelCase_ ( _a): lowerCamelCase__ : Optional[Any] = ["image_processor", "tokenizer"] lowerCamelCase__ : str = "LayoutLMv3ImageProcessor" lowerCamelCase__ : List[str] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self , a=None , a=None , **a ) -> Optional[int]: lowercase__ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , a , ) lowercase__ : int = kwargs.pop('feature_extractor' ) lowercase__ : Dict = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(a , a ) def __call__( self , a , a = None , a = None , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = None , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> List[Any]: if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( 'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( 'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' ) # first, apply the image processor lowercase__ : List[str] = self.image_processor(images=a , return_tensors=a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(a , a ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : List[str] = features['words'] lowercase__ : List[Any] = self.tokenizer( text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , ) # add pixel values lowercase__ : Tuple = features.pop('pixel_values' ) if return_overflowing_tokens is True: lowercase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] ) lowercase__ : List[str] = images return encoded_inputs def _UpperCAmelCase ( self , a , a ) -> Optional[Any]: lowercase__ : int = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(a ) != len(a ): raise ValueError( 'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got' f""" {len(a )} and {len(a )}""" ) return images_with_overflow def _UpperCAmelCase ( self , *a , **a ) -> Dict: return self.tokenizer.batch_decode(*a , **a ) def _UpperCAmelCase ( self , *a , **a ) -> Union[str, Any]: return self.tokenizer.decode(*a , **a ) @property def _UpperCAmelCase ( self ) -> int: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _UpperCAmelCase ( self ) -> List[str]: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , ) return self.image_processor_class @property def _UpperCAmelCase ( self ) -> Dict: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , ) return self.image_processor
703
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int: lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Dict = image_size lowercase__ : str = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[str] = embed_dim lowercase__ : Any = depths lowercase__ : Dict = num_heads lowercase__ : List[str] = window_size lowercase__ : int = mlp_ratio lowercase__ : Tuple = qkv_bias lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Tuple = drop_path_rate lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = use_absolute_embeddings lowercase__ : Optional[Any] = patch_norm lowercase__ : Any = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : List[str] = is_training lowercase__ : int = scope lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = type_sequence_label_size lowercase__ : List[str] = encoder_stride lowercase__ : Optional[Any] = out_features lowercase__ : Dict = out_indices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Tuple = MaskFormerSwinModel(config=a ) model.to(a ) model.eval() lowercase__ : str = model(a ) lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a ) model.to(a ) model.eval() lowercase__ : int = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(a ): lowercase__ : Dict = ['stem'] lowercase__ : List[str] = MaskFormerSwinBackbone(config=a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase__ : str = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Any = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = MaskFormerSwinModelTester(self ) lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> str: return def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) @unittest.skip('Swin does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip('Swin does not support feedforward chunking' ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> str: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) lowercase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def _UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple: lowercase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swin has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = 3 lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : int = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(a ): lowercase__ : Union[str, Any] = 0 return t def check_equivalence(a , a , a , a={} ): with torch.no_grad(): lowercase__ : Optional[Any] = model(**a , return_dict=a , **a ) lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple() def recursive_check(a , a ): if isinstance(a , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a , a ): recursive_check(a , a ) elif isinstance(a , a ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(a , a ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has""" f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}.""" ) , ) recursive_check(a , a ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) model.to(a ) model.eval() lowercase__ : Tuple = self._prepare_for_class(a , a ) lowercase__ : Optional[Any] = self._prepare_for_class(a , a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a ) lowercase__ : int = self._prepare_for_class(a , a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _a): lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[int] = MaskFormerSwinModelTester(self ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: lowercase__ : Optional[Any] = backbone_class(a ) backbone.to(a ) backbone.eval() lowercase__ : Union[str, Any] = backbone(**a ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , a ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowercase__ : List[str] = backbone(**a , output_hidden_states=a ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowercase__ : List[Any] = backbone(**a , output_attentions=a ) self.assertIsNotNone(outputs.attentions )
645
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers _UpperCamelCase : List[str] = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)] def a_ ( ): '''simple docstring''' lowercase__ : str = os.path.dirname(os.path.realpath(__lowercase ) ) lowercase__ : Tuple = os.path.join(__lowercase , 'words.txt' ) lowercase__ : str = '' with open(__lowercase ) as f: lowercase__ : Optional[Any] = f.readline() lowercase__ : Tuple = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )] lowercase__ : Optional[Any] = [ word for word in [sum(ord(__lowercase ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(__lowercase ) if __name__ == "__main__": print(solution())
704
"""simple docstring""" import math def a_ ( _lowerCAmelCase : int = 100 ): '''simple docstring''' lowercase__ : Union[str, Any] = sum(i * i for i in range(1 , n + 1 ) ) lowercase__ : str = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
645
0
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ , lowercase__ : str = image.size lowercase__ , lowercase__ : Any = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 lowercase__ : str = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) lowercase__ : List[Any] = np.array(__snake_case ).astype(np.floataa ) / 2_5_5.0 lowercase__ : Any = image[None].transpose(0 , 3 , 1 , 2 ) lowercase__ : Dict = torch.from_numpy(__snake_case ) return 2.0 * image - 1.0 class UpperCAmelCase_ ( __lowercase): def __init__( self , a , a , a , ) -> int: super().__init__() self.register_modules(vqvae=_A , unet=_A , scheduler=_A ) @torch.no_grad() def __call__( self , a = None , a = 1 , a = 1_0_0 , a = 0.0 , a = None , a = "pil" , a = True , ) -> Dict: if isinstance(_A , PIL.Image.Image ): lowercase__ : Tuple = 1 elif isinstance(_A , torch.Tensor ): lowercase__ : Optional[Any] = image.shape[0] else: raise ValueError(f"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_A )}""" ) if isinstance(_A , PIL.Image.Image ): lowercase__ : Union[str, Any] = preprocess(_A ) lowercase__ , lowercase__ : List[Any] = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image lowercase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width) lowercase__ : Optional[int] = next(self.unet.parameters() ).dtype lowercase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A ) lowercase__ : Any = image.to(device=self.device , dtype=_A ) # set timesteps and move to the correct device self.scheduler.set_timesteps(_A , device=self.device ) lowercase__ : str = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler lowercase__ : Union[str, Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ : List[str] = {} if accepts_eta: lowercase__ : Optional[int] = eta for t in self.progress_bar(_A ): # concat latents and low resolution image in the channel dimension. lowercase__ : Optional[int] = torch.cat([latents, image] , dim=1 ) lowercase__ : str = self.scheduler.scale_model_input(_A , _A ) # predict the noise residual lowercase__ : Union[str, Any] = self.unet(_A , _A ).sample # compute the previous noisy sample x_t -> x_t-1 lowercase__ : Optional[int] = self.scheduler.step(_A , _A , _A , **_A ).prev_sample # decode the image latents with the VQVAE lowercase__ : int = self.vqvae.decode(_A ).sample lowercase__ : Optional[Any] = torch.clamp(_A , -1.0 , 1.0 ) lowercase__ : Optional[int] = image / 2 + 0.5 lowercase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : Any = self.numpy_to_pil(_A ) if not return_dict: return (image,) return ImagePipelineOutput(images=_A )
705
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() def _UpperCAmelCase ( self ) -> Tuple: lowercase__ , lowercase__ : str = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : List[Any] = controlnet_params lowercase__ : int = 'bird' lowercase__ : List[Any] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples ) lowercase__ : List[Any] = jax.random.PRNGKey(0 ) lowercase__ : Tuple = jax.random.split(a , jax.device_count() ) lowercase__ : str = replicate(a ) lowercase__ : List[str] = shard(a ) lowercase__ : Dict = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : Tuple = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : Optional[Any] = jnp.array( [0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2 def _UpperCAmelCase ( self ) -> List[str]: lowercase__ , lowercase__ : int = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=a , dtype=jnp.bfloataa ) lowercase__ , lowercase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=a , from_pt=a , dtype=jnp.bfloataa ) lowercase__ : Optional[Any] = controlnet_params lowercase__ : List[Any] = 'Chef in the kitchen' lowercase__ : List[str] = jax.device_count() lowercase__ : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) lowercase__ : Optional[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) lowercase__ : Optional[int] = pipe.prepare_image_inputs([pose_image] * num_samples ) lowercase__ : List[str] = jax.random.PRNGKey(0 ) lowercase__ : str = jax.random.split(a , jax.device_count() ) lowercase__ : Optional[Any] = replicate(a ) lowercase__ : Optional[Any] = shard(a ) lowercase__ : List[Any] = shard(a ) lowercase__ : List[Any] = pipe( prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=5_0 , jit=a , ).images assert images.shape == (jax.device_count(), 1, 7_6_8, 5_1_2, 3) lowercase__ : Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) lowercase__ : List[str] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] lowercase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) lowercase__ : str = jnp.array( [[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] ) print(f"""output_slice: {output_slice}""" ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
645
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class UpperCAmelCase_ : lowerCamelCase__ : str = LEDConfig lowerCamelCase__ : Dict = {} lowerCamelCase__ : Dict = '''gelu''' def __init__( self , a , a=1_3 , a=7 , a=True , a=False , a=9_9 , a=3_2 , a=2 , a=4 , a=3_7 , a=0.1 , a=0.1 , a=2_0 , a=2 , a=1 , a=0 , a=4 , ) -> Optional[Any]: lowercase__ : Dict = parent lowercase__ : List[str] = batch_size lowercase__ : Dict = seq_length lowercase__ : Tuple = is_training lowercase__ : Optional[int] = use_labels lowercase__ : Union[str, Any] = vocab_size lowercase__ : int = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : Tuple = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : int = eos_token_id lowercase__ : Any = pad_token_id lowercase__ : str = bos_token_id lowercase__ : Dict = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowercase__ : List[str] = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowercase__ : Optional[Any] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowercase__ : Dict = prepare_led_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) lowercase__ : List[str] = tf.concat( [tf.zeros_like(snake_case__ )[:, :-1], tf.ones_like(snake_case__ )[:, -1:]] , axis=-1 , ) lowercase__ : int = global_attention_mask return config, inputs_dict def _UpperCAmelCase ( self , a , a ) -> Optional[int]: lowercase__ : List[str] = TFLEDModel(config=snake_case__ ).get_decoder() lowercase__ : Tuple = inputs_dict['input_ids'] lowercase__ : List[Any] = input_ids[:1, :] lowercase__ : List[Any] = inputs_dict['attention_mask'][:1, :] lowercase__ : Union[str, Any] = 1 # first forward pass lowercase__ : str = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ ) lowercase__ , lowercase__ : str = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ : int = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ )[0] lowercase__ : Any = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx] lowercase__ : int = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : int=None , ): '''simple docstring''' if attention_mask is None: lowercase__ : Tuple = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowercase__ : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowercase__ : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowercase__ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class UpperCAmelCase_ ( __a , __a , unittest.TestCase): lowerCamelCase__ : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () lowerCamelCase__ : int = (TFLEDForConditionalGeneration,) if is_tf_available() else () lowerCamelCase__ : List[Any] = ( { '''conversational''': TFLEDForConditionalGeneration, '''feature-extraction''': TFLEDModel, '''summarization''': TFLEDForConditionalGeneration, '''text2text-generation''': TFLEDForConditionalGeneration, '''translation''': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) lowerCamelCase__ : Any = True lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False lowerCamelCase__ : Optional[int] = False def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Optional[Any] = TFLEDModelTester(self ) lowercase__ : Any = ConfigTester(self , config_class=snake_case__ ) def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> str: lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = tf.zeros_like(inputs_dict['attention_mask'] ) lowercase__ : List[str] = 2 lowercase__ : str = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , ) lowercase__ : Union[str, Any] = True lowercase__ : Union[str, Any] = self.model_tester.seq_length lowercase__ : Dict = self.model_tester.encoder_seq_length def check_decoder_attentions_output(a ): lowercase__ : int = outputs.decoder_attentions self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(a ): lowercase__ : Dict = [t.numpy() for t in outputs.encoder_attentions] lowercase__ : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(snake_case__ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowercase__ : Optional[Any] = True lowercase__ : Union[str, Any] = False lowercase__ : Optional[Any] = False lowercase__ : int = model_class(snake_case__ ) lowercase__ : Union[str, Any] = model(self._prepare_for_class(snake_case__ , snake_case__ ) ) lowercase__ : Any = len(snake_case__ ) self.assertEqual(config.output_hidden_states , snake_case__ ) check_encoder_attentions_output(snake_case__ ) if self.is_encoder_decoder: lowercase__ : int = model_class(snake_case__ ) lowercase__ : List[str] = model(self._prepare_for_class(snake_case__ , snake_case__ ) ) self.assertEqual(config.output_hidden_states , snake_case__ ) check_decoder_attentions_output(snake_case__ ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowercase__ : Dict = True lowercase__ : Tuple = model_class(snake_case__ ) lowercase__ : Any = model(self._prepare_for_class(snake_case__ , snake_case__ ) ) self.assertEqual(config.output_hidden_states , snake_case__ ) check_encoder_attentions_output(snake_case__ ) # Check attention is always last and order is fine lowercase__ : List[Any] = True lowercase__ : Union[str, Any] = True lowercase__ : Optional[Any] = model_class(snake_case__ ) lowercase__ : int = model(self._prepare_for_class(snake_case__ , snake_case__ ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case__ ) ) self.assertEqual(model.config.output_hidden_states , snake_case__ ) check_encoder_attentions_output(snake_case__ ) @unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' ) def _UpperCAmelCase ( self ) -> List[str]: pass def _UpperCAmelCase ( self ) -> List[Any]: # TODO: Head-masking not yet implement pass def a_ ( _lowerCAmelCase : Dict ): '''simple docstring''' return tf.constant(lowerCAmelCase__ , dtype=tf.intaa ) _UpperCamelCase : Union[str, Any] = 1e-4 @slow @require_tf class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Dict: lowercase__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led # change to intended input here lowercase__ : str = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowercase__ : str = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowercase__ : List[Any] = prepare_led_inputs_dict(model.config , snake_case__ , snake_case__ ) lowercase__ : Any = model(**snake_case__ )[0] lowercase__ : List[Any] = (1, 1_0_2_4, 7_6_8) self.assertEqual(output.shape , snake_case__ ) # change to expected output here lowercase__ : Dict = tf.convert_to_tensor( [[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , ) tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-3 ) def _UpperCAmelCase ( self ) -> str: lowercase__ : List[Any] = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ) # change to intended input here lowercase__ : Dict = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowercase__ : str = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] ) lowercase__ : Dict = prepare_led_inputs_dict(model.config , snake_case__ , snake_case__ ) lowercase__ : Dict = model(**snake_case__ )[0] lowercase__ : Union[str, Any] = (1, 1_0_2_4, model.config.vocab_size) self.assertEqual(output.shape , snake_case__ ) # change to expected output here lowercase__ : str = tf.convert_to_tensor( [[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , ) tf.debugging.assert_near(output[:, :3, :3] , snake_case__ , atol=1e-3 , rtol=1e-3 )
706
"""simple docstring""" from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
645
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase : Any = logging.get_logger(__name__) _UpperCamelCase : Optional[int] = { "google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json", # See all CANINE models at https://huggingface.co/models?filter=canine } class UpperCAmelCase_ ( __lowerCamelCase): lowerCamelCase__ : Dict = '''canine''' def __init__( self , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=1_6_3_8_4 , a=1_6 , a=0.02 , a=1e-12 , a=0 , a=0XE_000 , a=0XE_001 , a=4 , a=4 , a=8 , a=1_6_3_8_4 , a=1_2_8 , **a , ) -> Optional[int]: super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ ) lowercase__ : Dict = max_position_embeddings lowercase__ : List[Any] = hidden_size lowercase__ : str = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : Tuple = hidden_act lowercase__ : Any = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : List[Any] = initializer_range lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : List[str] = layer_norm_eps # Character config: lowercase__ : List[Any] = downsampling_rate lowercase__ : Union[str, Any] = upsampling_kernel_size lowercase__ : Optional[Any] = num_hash_functions lowercase__ : str = num_hash_buckets lowercase__ : int = local_transformer_stride
707
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) lowercase__ : Union[str, Any] = AutoModelForImageClassification.from_pretrained('microsoft/dit-base-finetuned-rvlcdip' ) model.to(a ) from datasets import load_dataset lowercase__ : str = load_dataset('nielsr/rvlcdip-demo' ) lowercase__ : Tuple = dataset['train'][0]['image'].convert('RGB' ) lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : List[str] = model(**a ) lowercase__ : List[Any] = outputs.logits lowercase__ : Union[str, Any] = torch.Size((1, 1_6) ) self.assertEqual(logits.shape , a ) lowercase__ : Tuple = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=a , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , a , atol=1e-4 ) )
645
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[Any] = IFInpaintingSuperResolutionPipeline lowerCamelCase__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} lowerCamelCase__ : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"}) lowerCamelCase__ : Tuple = PipelineTesterMixin.required_optional_params - {"latents"} def _UpperCAmelCase ( self ) -> List[Any]: return self._get_superresolution_dummy_components() def _UpperCAmelCase ( self , a , a=0 ) -> Optional[Any]: if str(a ).startswith('mps' ): lowercase__ : Any = torch.manual_seed(a ) else: lowercase__ : Union[str, Any] = torch.Generator(device=a ).manual_seed(a ) lowercase__ : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(a ) ).to(a ) lowercase__ : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a ) lowercase__ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a ) lowercase__ : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'original_image': original_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def _UpperCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' ) def _UpperCAmelCase ( self ) -> Dict: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def _UpperCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: self._test_save_load_local() def _UpperCAmelCase ( self ) -> int: self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
708
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class UpperCAmelCase_ : @staticmethod def _UpperCAmelCase ( *a , **a ) -> int: pass def a_ ( _lowerCAmelCase : Image ): '''simple docstring''' lowercase__ : List[str] = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class UpperCAmelCase_ ( unittest.TestCase): lowerCamelCase__ : Union[str, Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Union[str, Any] = DepthEstimationPipeline(model=a , image_processor=a ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _UpperCAmelCase ( self , a , a ) -> Optional[int]: lowercase__ : Tuple = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' ) self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , a ) import datasets lowercase__ : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) lowercase__ : List[Any] = depth_estimator( [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] ) self.assertEqual( [ {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, {'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )}, ] , a , ) @require_tf @unittest.skip('Depth estimation is not implemented in TF' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @slow @require_torch def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Tuple = 'Intel/dpt-large' lowercase__ : Optional[int] = pipeline('depth-estimation' , model=a ) lowercase__ : List[Any] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' ) lowercase__ : Optional[Any] = hashimage(outputs['depth'] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.662 ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: # This is highly irregular to have no small tests. self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : list ) -> list: '''simple docstring''' if len(_A ) <= 1: return [tuple(_A )] lowercase__ : Any = [] def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ): lowercase__ : List[str] = [0] * n res.append(tuple(_A ) ) lowercase__ : List[Any] = 0 while i < n: if c[i] < i: if i % 2 == 0: lowercase__ , lowercase__ : List[str] = arr[i], arr[0] else: lowercase__ , lowercase__ : List[str] = arr[i], arr[c[i]] res.append(tuple(_A ) ) c[i] += 1 lowercase__ : Any = 0 else: lowercase__ : Union[str, Any] = 0 i += 1 generate(len(_A ) , _A ) return res if __name__ == "__main__": _UpperCamelCase : str = input("Enter numbers separated by a comma:\n").strip() _UpperCamelCase : Optional[Any] = [int(item) for item in user_input.split(",")] print(heaps(arr))
709
"""simple docstring""" import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class UpperCAmelCase_ ( _a): def __init__( self ) -> Any: lowercase__ : Tuple = [] def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_init_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[int]: self.events.append('on_train_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_train_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_epoch_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Optional[Any]: self.events.append('on_epoch_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_step_begin' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> str: self.events.append('on_step_end' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> int: self.events.append('on_evaluate' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Tuple: self.events.append('on_predict' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Union[str, Any]: self.events.append('on_save' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> List[str]: self.events.append('on_log' ) def _UpperCAmelCase ( self , a , a , a , **a ) -> Any: self.events.append('on_prediction_step' ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> str: lowercase__ : str = tempfile.mkdtemp() def _UpperCAmelCase ( self ) -> Dict: shutil.rmtree(self.output_dir ) def _UpperCAmelCase ( self , a=0 , a=0 , a=6_4 , a=6_4 , a=None , a=False , **a ) -> int: # disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure # its set to False since the tests later on depend on its value. lowercase__ : str = RegressionDataset(length=a ) lowercase__ : Any = RegressionDataset(length=a ) lowercase__ : Optional[Any] = RegressionModelConfig(a=a , b=a ) lowercase__ : Union[str, Any] = RegressionPreTrainedModel(a ) lowercase__ : Tuple = TrainingArguments(self.output_dir , disable_tqdm=a , report_to=[] , **a ) return Trainer( a , a , train_dataset=a , eval_dataset=a , callbacks=a , ) def _UpperCAmelCase ( self , a , a ) -> Union[str, Any]: self.assertEqual(len(a ) , len(a ) ) # Order doesn't matter lowercase__ : Optional[int] = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) lowercase__ : Tuple = sorted(a , key=lambda a : cb.__name__ if isinstance(a , a ) else cb.__class__.__name__ ) for cba, cba in zip(a , a ): if isinstance(a , a ) and isinstance(a , a ): self.assertEqual(a , a ) elif isinstance(a , a ) and not isinstance(a , a ): self.assertEqual(a , cba.__class__ ) elif not isinstance(a , a ) and isinstance(a , a ): self.assertEqual(cba.__class__ , a ) else: self.assertEqual(a , a ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: lowercase__ : Dict = ['on_init_end', 'on_train_begin'] lowercase__ : List[Any] = 0 lowercase__ : Optional[int] = len(trainer.get_eval_dataloader() ) lowercase__ : Tuple = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate'] for _ in range(trainer.state.num_train_epochs ): expected_events.append('on_epoch_begin' ) for _ in range(a ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append('on_log' ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append('on_save' ) expected_events.append('on_epoch_end' ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : int = self.get_trainer() lowercase__ : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # Callbacks passed at init are added to the default callbacks lowercase__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback lowercase__ : List[Any] = self.get_trainer(disable_tqdm=a ) lowercase__ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback] lowercase__ : List[str] = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Optional[Any] = self.get_trainer() lowercase__ : List[Any] = trainer.pop_callback(a ) self.assertEqual(cb.__class__ , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) # We can also add, pop, or remove by instance lowercase__ : int = self.get_trainer() lowercase__ : List[str] = trainer.callback_handler.callbacks[0] trainer.remove_callback(a ) expected_callbacks.remove(a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) lowercase__ : Tuple = self.get_trainer() lowercase__ : Dict = trainer.callback_handler.callbacks[0] lowercase__ : Union[str, Any] = trainer.pop_callback(a ) self.assertEqual(a , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) trainer.add_callback(a ) expected_callbacks.insert(0 , a ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a ) def _UpperCAmelCase ( self ) -> Tuple: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action='ignore' , category=a ) lowercase__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # Independent log/save/eval lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() lowercase__ : int = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() lowercase__ : Union[str, Any] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' ) trainer.train() lowercase__ : Optional[int] = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) lowercase__ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' ) trainer.train() lowercase__ : str = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # A bit of everything lowercase__ : Any = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=1_0 , eval_steps=5 , evaluation_strategy='steps' , ) trainer.train() lowercase__ : Any = trainer.callback_handler.callbacks[-2].events self.assertEqual(a , self.get_expected_events(a ) ) # warning should be emitted for duplicated callbacks with patch('transformers.trainer_callback.logger.warning' ) as warn_mock: lowercase__ : str = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(a ) in warn_mock.call_args[0][0]
645
0
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() _UpperCamelCase : str =logging.get_logger(__name__) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : str = WavaVecaForSequenceClassification.from_pretrained(A_ , config=A_ ) lowercase__ : int = downstream_dict['projector.weight'] lowercase__ : List[Any] = downstream_dict['projector.bias'] lowercase__ : Tuple = downstream_dict['model.post_net.linear.weight'] lowercase__ : Dict = downstream_dict['model.post_net.linear.bias'] return model def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : Dict = WavaVecaForAudioFrameClassification.from_pretrained(A_ , config=A_ ) lowercase__ : List[Any] = downstream_dict['model.linear.weight'] lowercase__ : List[Any] = downstream_dict['model.linear.bias'] return model def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : str = WavaVecaForXVector.from_pretrained(A_ , config=A_ ) lowercase__ : Tuple = downstream_dict['connector.weight'] lowercase__ : str = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): lowercase__ : Union[str, Any] = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] lowercase__ : int = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] lowercase__ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] lowercase__ : Optional[Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] lowercase__ : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] lowercase__ : Dict = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] lowercase__ : Optional[Any] = downstream_dict['objective.W'] return model @torch.no_grad() def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] ): '''simple docstring''' lowercase__ : Optional[Any] = torch.load(A_ , map_location='cpu' ) lowercase__ : List[Any] = checkpoint['Downstream'] lowercase__ : Dict = WavaVecaConfig.from_pretrained(A_ ) lowercase__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained( A_ , return_attention_mask=A_ , do_normalize=A_ ) lowercase__ : str = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): lowercase__ : Optional[int] = convert_classification(A_ , A_ , A_ ) elif arch.endswith('ForAudioFrameClassification' ): lowercase__ : List[Any] = convert_diarization(A_ , A_ , A_ ) elif arch.endswith('ForXVector' ): lowercase__ : List[Any] = convert_xvector(A_ , A_ , A_ ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: lowercase__ : Tuple = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": _UpperCamelCase : Optional[Any] =argparse.ArgumentParser() parser.add_argument( "--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model." ) parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.") parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.") _UpperCamelCase : str =parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
710
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _UpperCamelCase : str = { "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Tuple = [ "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", "GPTNeoForCausalLM", "GPTNeoForQuestionAnswering", "GPTNeoForSequenceClassification", "GPTNeoForTokenClassification", "GPTNeoModel", "GPTNeoPreTrainedModel", "load_tf_weights_in_gpt_neo", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ "FlaxGPTNeoForCausalLM", "FlaxGPTNeoModel", "FlaxGPTNeoPreTrainedModel", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys _UpperCamelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
645
0
"""simple docstring""" import comet # From: unbabel-comet import torch import datasets _UpperCamelCase : str = datasets.logging.get_logger(__name__) _UpperCamelCase : Any = '''\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = "{COMET}: A Neural Framework for {MT} Evaluation", author = "Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon", booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.emnlp-main.213", pages = "2685--2702", } ''' _UpperCamelCase : int = '''\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. ''' _UpperCamelCase : Any = ''' COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric(\'comet\') >>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use >>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."] >>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"] >>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results["scores"]]) [0.19, 0.92] ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCAmelCase_ ( datasets.Metric): def _UpperCAmelCase ( self ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'sources': datasets.Value('string' , id='sequence' ), 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[ 'https://github.com/Unbabel/COMET', 'https://www.aclweb.org/anthology/2020.emnlp-main.213/', 'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6', ] , ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: if self.config_name == "default": lowercase__ : Optional[Any] = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) ) else: lowercase__ : List[str] = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def _UpperCAmelCase ( self , a , a , a , a=None , a=False ) -> Dict: if gpus is None: lowercase__ : Optional[int] = 1 if torch.cuda.is_available() else 0 lowercase__ : Any = {'src': sources, 'mt': predictions, 'ref': references} lowercase__ : List[str] = [dict(zip(_A , _A ) ) for t in zip(*data.values() )] lowercase__ : int = self.scorer.predict(_A , gpus=_A , progress_bar=_A ) return {"mean_score": mean_score, "scores": scores}
711
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self , a ) -> str: for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ): lowercase__ : str = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = 'sshleifer/tiny-gpt2' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase__ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , only_pretrain_model=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase__ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : List[Any] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=a , multi_process=a , ) lowercase__ : Tuple = TensorFlowBenchmark(a , [config] ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> int: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : List[str] = AutoConfig.from_pretrained(a ) lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : List[str] = TensorFlowBenchmark(a , [config] ) lowercase__ : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : Optional[Any] = TensorFlowBenchmark(a ) lowercase__ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Optional[Any] = 'sshleifer/tiny-gpt2' lowercase__ : Optional[int] = AutoConfig.from_pretrained(a ) lowercase__ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : str = TensorFlowBenchmark(a , [config] ) lowercase__ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = 'patrickvonplaten/t5-tiny-random' lowercase__ : Any = AutoConfig.from_pretrained(a ) lowercase__ : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=a , ) lowercase__ : int = TensorFlowBenchmark(a , configs=[config] ) lowercase__ : Union[str, Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : Any = 'sshleifer/tiny-gpt2' lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=a , inference=a , sequence_lengths=[8] , batch_sizes=[1] , use_xla=a , multi_process=a , ) lowercase__ : Any = TensorFlowBenchmark(a ) lowercase__ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Any = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , save_to_csv=a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(a , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(a , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(a , 'env.csv' ) , multi_process=a , ) lowercase__ : Union[str, Any] = TensorFlowBenchmark(a ) benchmark.run() self.assertTrue(Path(os.path.join(a , 'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(a , 'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Tuple = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(a ): self.assertTrue(hasattr(a , 'sequential' ) ) self.assertTrue(hasattr(a , 'cumulative' ) ) self.assertTrue(hasattr(a , 'current' ) ) self.assertTrue(hasattr(a , 'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase__ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(a , 'log.txt' ) , log_print=a , trace_memory_line_by_line=a , eager_mode=a , multi_process=a , ) lowercase__ : Optional[int] = TensorFlowBenchmark(a ) lowercase__ : Optional[Any] = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(a , 'log.txt' ) ).exists() )
645
0
"""simple docstring""" import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification _UpperCamelCase : List[Any] = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co _UpperCamelCase : str = "main" # Default branch name _UpperCamelCase : Any = "f2c752cfc5c0ab6f4bdec59acea69eefbee381c2" # One particular commit (not the top of `main`) _UpperCamelCase : Optional[int] = "aaaaaaa" # This commit does not exist, so we should 404. _UpperCamelCase : Optional[Any] = "d9e9f15bc825e4b2c9249e9578f884bbcb5e3684" # Sha-1 of config.json on the top of `main`, for checking purposes _UpperCamelCase : str = "4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3" @contextlib.contextmanager def a_ ( ): '''simple docstring''' print('Welcome!' ) yield print('Bye!' ) @contextlib.contextmanager def a_ ( ): '''simple docstring''' print('Bonjour!' ) yield print('Au revoir!' ) class UpperCAmelCase_ ( unittest.TestCase): def _UpperCAmelCase ( self ) -> Optional[int]: assert transformers.__spec__ is not None assert importlib.util.find_spec('transformers' ) is not None class UpperCAmelCase_ ( unittest.TestCase): @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def _UpperCAmelCase ( self , a ) -> Dict: with ContextManagers([] ): print('Transformers are awesome!' ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' ) @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def _UpperCAmelCase ( self , a ) -> Optional[Any]: with ContextManagers([context_en()] ): print('Transformers are awesome!' ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' ) @unittest.mock.patch('sys.stdout' , new_callable=io.StringIO ) def _UpperCAmelCase ( self , a ) -> List[Any]: with ContextManagers([context_fr(), context_en()] ): print('Transformers are awesome!' ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' ) @require_torch def _UpperCAmelCase ( self ) -> Optional[int]: self.assertEqual(find_labels(__a ) , ['labels'] ) self.assertEqual(find_labels(__a ) , ['labels', 'next_sentence_label'] ) self.assertEqual(find_labels(__a ) , ['start_positions', 'end_positions'] ) class UpperCAmelCase_ ( _a): pass self.assertEqual(find_labels(__a ) , ['labels'] ) @require_tf def _UpperCAmelCase ( self ) -> List[Any]: self.assertEqual(find_labels(__a ) , ['labels'] ) self.assertEqual(find_labels(__a ) , ['labels', 'next_sentence_label'] ) self.assertEqual(find_labels(__a ) , ['start_positions', 'end_positions'] ) class UpperCAmelCase_ ( _a): pass self.assertEqual(find_labels(__a ) , ['labels'] ) @require_flax def _UpperCAmelCase ( self ) -> str: self.assertEqual(find_labels(__a ) , [] ) self.assertEqual(find_labels(__a ) , [] ) self.assertEqual(find_labels(__a ) , [] ) class UpperCAmelCase_ ( _a): pass self.assertEqual(find_labels(__a ) , [] )
712
"""simple docstring""" import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class UpperCAmelCase_ ( _a): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=False , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=3 , a=4 , a=None , ) -> Any: lowercase__ : Tuple = parent lowercase__ : List[Any] = batch_size lowercase__ : List[Any] = seq_length lowercase__ : List[Any] = is_training lowercase__ : Optional[Any] = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : int = use_labels lowercase__ : Tuple = vocab_size lowercase__ : int = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : List[str] = type_vocab_size lowercase__ : Tuple = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = num_labels lowercase__ : Tuple = num_choices lowercase__ : str = scope def _UpperCAmelCase ( self ) -> Any: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_input_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Dict = None lowercase__ : Optional[Any] = None lowercase__ : int = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : List[Any] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> Optional[int]: return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Tuple = DistilBertModel(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , a ) lowercase__ : str = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Dict: lowercase__ : Optional[int] = DistilBertForMaskedLM(config=a ) model.to(a ) model.eval() lowercase__ : Union[str, Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> int: lowercase__ : Tuple = DistilBertForQuestionAnswering(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model( a , attention_mask=a , start_positions=a , end_positions=a ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> List[str]: lowercase__ : int = self.num_labels lowercase__ : Dict = DistilBertForSequenceClassification(a ) model.to(a ) model.eval() lowercase__ : Optional[Any] = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Any: lowercase__ : Any = self.num_labels lowercase__ : List[str] = DistilBertForTokenClassification(config=a ) model.to(a ) model.eval() lowercase__ : Any = model(a , attention_mask=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a , a , a ) -> Tuple: lowercase__ : List[Any] = self.num_choices lowercase__ : Any = DistilBertForMultipleChoice(config=a ) model.to(a ) model.eval() lowercase__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowercase__ : int = model( a , attention_mask=a , labels=a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Union[str, Any] = self.prepare_config_and_inputs() ((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : List[str] = config_and_inputs lowercase__ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : List[str] = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) lowerCamelCase__ : str = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : Any = True lowerCamelCase__ : List[Any] = True lowerCamelCase__ : Optional[Any] = True def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : str = DistilBertModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=a , dim=3_7 ) def _UpperCAmelCase ( self ) -> Dict: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*a ) def _UpperCAmelCase ( self ) -> Any: lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*a ) def _UpperCAmelCase ( self ) -> int: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*a ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = DistilBertModel.from_pretrained(a ) self.assertIsNotNone(a ) @slow @require_torch_gpu def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return lowercase__ : Optional[int] = True lowercase__ : Union[str, Any] = model_class(config=a ) lowercase__ : int = self._prepare_for_class(a , a ) lowercase__ : Tuple = torch.jit.trace( a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a , os.path.join(a , 'traced_model.pt' ) ) lowercase__ : Optional[int] = torch.jit.load(os.path.join(a , 'traced_model.pt' ) , map_location=a ) loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : int = DistilBertModel.from_pretrained('distilbert-base-uncased' ) lowercase__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): lowercase__ : Optional[Any] = model(a , attention_mask=a )[0] lowercase__ : Tuple = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , a ) lowercase__ : List[Any] = torch.tensor( [[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" import math class UpperCAmelCase_ : def __init__( self , a=0 ) -> List[Any]: # a graph with Node 0,1,...,N-1 lowercase__ : Any = n lowercase__ : Optional[int] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # adjacency matrix for weight lowercase__ : Union[str, Any] = [ [math.inf for j in range(0 , lowerCamelCase_ )] for i in range(0 , lowerCamelCase_ ) ] # dp[i][j] stores minimum distance from i to j def _UpperCAmelCase ( self , a , a , a ) -> str: lowercase__ : Union[str, Any] = w def _UpperCAmelCase ( self ) -> Any: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowercase__ : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def _UpperCAmelCase ( self , a , a ) -> Optional[int]: return self.dp[u][v] if __name__ == "__main__": _UpperCamelCase : Union[str, Any] = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
713
"""simple docstring""" from __future__ import annotations def a_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float , ): '''simple docstring''' if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ): '''simple docstring''' def count_of_possible_combinations(_lowerCAmelCase : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(A__ ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ): '''simple docstring''' def count_of_possible_combinations_with_dp_array( _lowerCAmelCase : int , _lowerCAmelCase : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase__ : Dict = sum( count_of_possible_combinations_with_dp_array(target - item , A__ ) for item in array ) lowercase__ : Tuple = answer return answer lowercase__ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(A__ , A__ ) def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ): '''simple docstring''' lowercase__ : Any = [0] * (target + 1) lowercase__ : Union[str, Any] = 1 for i in range(1 , target + 1 ): for j in range(A__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _UpperCamelCase : Tuple = 3 _UpperCamelCase : int = 5 _UpperCamelCase : Optional[Any] = [1, 2, 5] print(combination_sum_iv(n, array, target))
714
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=[3_0, 3_0] , a=2 , a=3 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=3 , a=None , a=8 , a=1_0 , ) -> Any: lowercase__ : List[str] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Optional[int] = image_size lowercase__ : List[Any] = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : str = is_training lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[Any] = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[Any] = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Any = type_sequence_label_size lowercase__ : Dict = initializer_range lowercase__ : Union[str, Any] = num_labels lowercase__ : Tuple = scope lowercase__ : Tuple = n_targets lowercase__ : Optional[int] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase__ : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens def _UpperCAmelCase ( self ) -> Any: lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase__ : Tuple = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase__ : int = [] for i in range(self.batch_size ): lowercase__ : Optional[Any] = {} lowercase__ : Any = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=a ) lowercase__ : List[str] = torch.rand(self.n_targets , 4 , device=a ) labels.append(a ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> List[Any]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def _UpperCAmelCase ( self , a , a , a ) -> int: lowercase__ : List[str] = YolosModel(config=a ) model.to(a ) model.eval() lowercase__ : List[Any] = model(a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = YolosForObjectDetection(a ) model.to(a ) model.eval() lowercase__ : Dict = model(pixel_values=a ) lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase__ : str = model(pixel_values=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Any = config_and_inputs lowercase__ : Any = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () lowerCamelCase__ : List[str] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Tuple = False lowerCamelCase__ : Union[str, Any] = False def _UpperCAmelCase ( self , a , a , a=False ) -> Dict: lowercase__ : List[str] = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase__ : Optional[Any] = [] for i in range(self.model_tester.batch_size ): lowercase__ : Dict = {} lowercase__ : Dict = torch.ones( size=(self.model_tester.n_targets,) , device=a , dtype=torch.long ) lowercase__ : Optional[Any] = torch.ones( self.model_tester.n_targets , 4 , device=a , dtype=torch.float ) labels.append(a ) lowercase__ : Union[str, Any] = labels return inputs_dict def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : Dict = YolosModelTester(self ) lowercase__ : Optional[int] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self ) -> str: self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: # YOLOS does not use inputs_embeds pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) lowercase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Dict: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Dict = True # in YOLOS, the seq_len is different lowercase__ : Tuple = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase__ : Optional[int] = True lowercase__ : str = False lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : str = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : Optional[int] = True lowercase__ : List[Any] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[str] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : Dict = len(a ) # Check attention is always last and order is fine lowercase__ : Any = True lowercase__ : int = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(a ) ) lowercase__ : Tuple = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> List[str]: def check_hidden_states_output(a , a , a ): lowercase__ : str = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(a , a ) ) lowercase__ : int = outputs.hidden_states lowercase__ : Any = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(a ) , a ) # YOLOS has a different seq_length lowercase__ : Optional[int] = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[Any] = True check_hidden_states_output(a , a , a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*a ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : int = YolosModel.from_pretrained(a ) self.assertIsNotNone(a ) def a_ ( ): '''simple docstring''' lowercase__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: return AutoImageProcessor.from_pretrained('hustvl/yolos-small' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = YolosForObjectDetection.from_pretrained('hustvl/yolos-small' ).to(a ) lowercase__ : Tuple = self.default_image_processor lowercase__ : Optional[int] = prepare_img() lowercase__ : int = image_processor(images=a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : int = model(inputs.pixel_values ) # verify outputs lowercase__ : Tuple = torch.Size((1, 1_0_0, 9_2) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : Any = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=a , ) lowercase__ : List[str] = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , a , atol=1e-4 ) ) # verify postprocessing lowercase__ : Optional[Any] = image_processor.post_process_object_detection( a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(a ) lowercase__ : Any = [7_5, 7_5, 1_7, 6_3, 1_7] lowercase__ : Optional[int] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(a ) self.assertEqual(len(results['scores'] ) , 5 ) self.assertTrue(torch.allclose(results['scores'] , a , atol=1e-4 ) ) self.assertSequenceEqual(results['labels'].tolist() , a ) self.assertTrue(torch.allclose(results['boxes'][0, :] , a ) )
645
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_distilbert import DistilBertTokenizer _UpperCamelCase : Union[str, Any] = logging.get_logger(__name__) _UpperCamelCase : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _UpperCamelCase : List[str] = { "vocab_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json" ), "distilbert-base-german-cased": ( "https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json" ), "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json" ), }, } _UpperCamelCase : Tuple = { "distilbert-base-uncased": 5_12, "distilbert-base-uncased-distilled-squad": 5_12, "distilbert-base-cased": 5_12, "distilbert-base-cased-distilled-squad": 5_12, "distilbert-base-german-cased": 5_12, "distilbert-base-multilingual-cased": 5_12, } _UpperCamelCase : List[Any] = { "distilbert-base-uncased": {"do_lower_case": True}, "distilbert-base-uncased-distilled-squad": {"do_lower_case": True}, "distilbert-base-cased": {"do_lower_case": False}, "distilbert-base-cased-distilled-squad": {"do_lower_case": False}, "distilbert-base-german-cased": {"do_lower_case": False}, "distilbert-base-multilingual-cased": {"do_lower_case": False}, } class UpperCAmelCase_ ( _UpperCamelCase): lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES lowerCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : Dict = PRETRAINED_INIT_CONFIGURATION lowerCamelCase__ : Optional[Any] = ["input_ids", "attention_mask"] lowerCamelCase__ : Union[str, Any] = DistilBertTokenizer def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Optional[Any]: super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) lowercase__ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , a ) != do_lower_case or normalizer_state.get('strip_accents' , a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars ): lowercase__ : List[Any] = getattr(a , normalizer_state.pop('type' ) ) lowercase__ : Optional[int] = do_lower_case lowercase__ : Any = strip_accents lowercase__ : int = tokenize_chinese_chars lowercase__ : Dict = normalizer_class(**a ) lowercase__ : List[Any] = do_lower_case def _UpperCAmelCase ( self , a , a=None ) -> Union[str, Any]: lowercase__ : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self , a , a = None ) -> Optional[Any]: lowercase__ : Optional[Any] = [self.sep_token_id] lowercase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self , a , a = None ) -> List[Any]: lowercase__ : List[str] = self._tokenizer.model.save(a , name=a ) return tuple(a )
715
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch _UpperCamelCase : int = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : def __init__( self , a=False , a=False , a=6.0 , a=None , a=False , a=False , a=None , a="fp4" , a=False , **a , ) -> Tuple: lowercase__ : str = load_in_abit lowercase__ : str = load_in_abit lowercase__ : List[str] = llm_inta_threshold lowercase__ : Dict = llm_inta_skip_modules lowercase__ : Tuple = llm_inta_enable_fpaa_cpu_offload lowercase__ : Any = llm_inta_has_fpaa_weight lowercase__ : Any = bnb_abit_quant_type lowercase__ : Dict = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: lowercase__ : Dict = torch.floataa elif isinstance(a , a ): lowercase__ : Any = getattr(a , a ) elif isinstance(a , torch.dtype ): lowercase__ : Any = bnb_abit_compute_dtype else: raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' ) self.post_init() def _UpperCAmelCase ( self ) -> str: if not isinstance(self.llm_inta_threshold , a ): raise ValueError('llm_int8_threshold must be a float' ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ): raise ValueError('llm_int8_skip_modules must be a list of strings' ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ): raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' ) if not isinstance(self.llm_inta_has_fpaa_weight , a ): raise ValueError('llm_int8_has_fp16_weight must be a boolean' ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' ) if not isinstance(self.bnb_abit_quant_type , a ): raise ValueError('bnb_4bit_quant_type must be a string' ) if not isinstance(self.bnb_abit_use_double_quant , a ): raise ValueError('bnb_4bit_use_double_quant must be a boolean' ) if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse( '0.39.0' ): raise ValueError( '4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' ) def _UpperCAmelCase ( self ) -> Tuple: return self.load_in_abit or self.load_in_abit def _UpperCAmelCase ( self ) -> List[str]: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def _UpperCAmelCase ( cls , a , a , **a ) -> Optional[Any]: lowercase__ : List[Any] = cls(**a ) lowercase__ : Union[str, Any] = [] for key, value in kwargs.items(): if hasattr(a , a ): setattr(a , a , a ) to_remove.append(a ) for key in to_remove: kwargs.pop(a , a ) if return_unused_kwargs: return config, kwargs else: return config def _UpperCAmelCase ( self , a ) -> Dict: with open(a , 'w' , encoding='utf-8' ) as writer: lowercase__ : Any = self.to_dict() lowercase__ : str = json.dumps(a , indent=2 , sort_keys=a ) + '\n' writer.write(a ) def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Any = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1] return output def __repr__( self ) -> Dict: return f"""{self.__class__.__name__} {self.to_json_string()}""" def _UpperCAmelCase ( self , a = True ) -> str: if use_diff is True: lowercase__ : List[Any] = self.to_diff_dict() else: lowercase__ : List[str] = self.to_dict() return json.dumps(a , indent=2 , sort_keys=a ) + "\n" def _UpperCAmelCase ( self ) -> Dict[str, Any]: lowercase__ : Tuple = self.to_dict() # get the default config dict lowercase__ : Optional[Any] = BitsAndBytesConfig().to_dict() lowercase__ : int = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: lowercase__ : Optional[int] = value return serializable_config_dict
645
0
"""simple docstring""" def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : int = len(a__ ) lowercase__ : Dict = sum(a__ ) lowercase__ : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): lowercase__ : Any = True for i in range(1 , s + 1 ): lowercase__ : Union[str, Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): lowercase__ : Tuple = dp[i][j - 1] if arr[i - 1] <= j: lowercase__ : Optional[Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: lowercase__ : Dict = s - 2 * j break return diff
716
"""simple docstring""" import argparse import gc import json import os import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _UpperCamelCase : int = 16 _UpperCamelCase : Union[str, Any] = 32 def a_ ( _lowerCAmelCase : Tuple ): '''simple docstring''' return int(x / 2**20 ) class UpperCAmelCase_ : def __enter__( self ) -> Union[str, Any]: gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero lowercase__ : List[str] = torch.cuda.memory_allocated() return self def __exit__( self , *a ) -> Any: gc.collect() torch.cuda.empty_cache() lowercase__ : Optional[Any] = torch.cuda.memory_allocated() lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated() lowercase__ : List[Any] = bamb(self.end - self.begin ) lowercase__ : List[Any] = bamb(self.peak - self.begin ) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def a_ ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 , _lowerCAmelCase : str = "bert-base-cased" , _lowerCAmelCase : int = 320 , _lowerCAmelCase : int = 160 , ): '''simple docstring''' lowercase__ : List[Any] = AutoTokenizer.from_pretrained(_lowerCAmelCase ) lowercase__ : Union[str, Any] = load_dataset( 'glue' , 'mrpc' , split={'train': f"""train[:{n_train}]""", 'validation': f"""validation[:{n_val}]"""} ) def tokenize_function(_lowerCAmelCase : int ): # max_length=None => use the model max length (it's actually the default) lowercase__ : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowercase__ : Union[str, Any] = datasets.map( _lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCAmelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(_lowerCAmelCase : Any ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' ) return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets['train'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) lowercase__ : Dict = DataLoader( tokenized_datasets['validation'] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase ) return train_dataloader, eval_dataloader def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Optional[int] = config['lr'] lowercase__ : Optional[Any] = int(config['num_epochs'] ) lowercase__ : Optional[Any] = int(config['seed'] ) lowercase__ : int = int(config['batch_size'] ) lowercase__ : Union[str, Any] = args.model_name_or_path set_seed(_lowerCAmelCase ) lowercase__ , lowercase__ : Tuple = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.n_train , args.n_val ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(_lowerCAmelCase , return_dict=_lowerCAmelCase ) # Instantiate optimizer lowercase__ : List[Any] = ( AdamW if accelerator.state.deepspeed_plugin is None or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowercase__ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=_lowerCAmelCase ) if accelerator.state.deepspeed_plugin is not None: lowercase__ : Optional[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ 'gradient_accumulation_steps' ] else: lowercase__ : List[Any] = 1 lowercase__ : List[Any] = (len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCAmelCase , num_warmup_steps=0 , num_training_steps=_lowerCAmelCase , ) else: lowercase__ : Tuple = DummyScheduler(_lowerCAmelCase , total_num_steps=_lowerCAmelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = accelerator.prepare( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # We need to keep track of how many total steps we have iterated over lowercase__ : Optional[int] = 0 # We also need to keep track of the stating epoch so files are named properly lowercase__ : Tuple = 0 # Now we train the model lowercase__ : Optional[Any] = {} for epoch in range(_lowerCAmelCase , _lowerCAmelCase ): with TorchTracemalloc() as tracemalloc: model.train() for step, batch in enumerate(_lowerCAmelCase ): lowercase__ : List[Any] = model(**_lowerCAmelCase ) lowercase__ : Dict = outputs.loss lowercase__ : int = loss / gradient_accumulation_steps accelerator.backward(_lowerCAmelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) ) accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) ) accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) ) accelerator.print( 'Total Peak Memory consumed during the train (max): {}'.format( tracemalloc.peaked + bamb(tracemalloc.begin ) ) ) lowercase__ : int = tracemalloc.peaked + bamb(tracemalloc.begin ) if args.peak_memory_upper_bound is not None: assert ( train_total_peak_memory[f"""epoch-{epoch}"""] <= args.peak_memory_upper_bound ), "Peak memory usage exceeded the upper bound" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def a_ ( ): '''simple docstring''' lowercase__ : int = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' ) parser.add_argument( '--model_name_or_path' , type=_lowerCAmelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCAmelCase , ) parser.add_argument( '--output_dir' , type=_lowerCAmelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , ) parser.add_argument( '--peak_memory_upper_bound' , type=_lowerCAmelCase , default=_lowerCAmelCase , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , ) parser.add_argument( '--n_train' , type=_lowerCAmelCase , default=320 , help='Number of training examples to use.' , ) parser.add_argument( '--n_val' , type=_lowerCAmelCase , default=160 , help='Number of validation examples to use.' , ) parser.add_argument( '--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of train epochs.' , ) lowercase__ : Any = parser.parse_args() lowercase__ : Optional[Any] = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16} training_function(_lowerCAmelCase , _lowerCAmelCase ) if __name__ == "__main__": main()
645
0
"""simple docstring""" from argparse import ArgumentParser from datasets.commands.convert import ConvertCommand from datasets.commands.dummy_data import DummyDataCommand from datasets.commands.env import EnvironmentCommand from datasets.commands.run_beam import RunBeamCommand from datasets.commands.test import TestCommand from datasets.utils.logging import set_verbosity_info def a_ ( _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )} def a_ ( ): '''simple docstring''' lowercase__ : str = ArgumentParser( 'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=snake_case__ ) lowercase__ : List[Any] = parser.add_subparsers(help='datasets-cli command helpers' ) set_verbosity_info() # Register commands ConvertCommand.register_subcommand(snake_case__ ) EnvironmentCommand.register_subcommand(snake_case__ ) TestCommand.register_subcommand(snake_case__ ) RunBeamCommand.register_subcommand(snake_case__ ) DummyDataCommand.register_subcommand(snake_case__ ) # Parse args lowercase__ , lowercase__ : int = parser.parse_known_args() if not hasattr(snake_case__ , 'func' ): parser.print_help() exit(1 ) lowercase__ : Any = parse_unknown_args(snake_case__ ) # Run lowercase__ : str = args.func(snake_case__ , **snake_case__ ) service.run() if __name__ == "__main__": main()
717
"""simple docstring""" def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Any = [0] * len(_lowerCAmelCase ) for i in range(1 , len(_lowerCAmelCase ) ): # use last results for better performance - dynamic programming lowercase__ : List[str] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: lowercase__ : Dict = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 lowercase__ : Union[str, Any] = j return prefix_result def a_ ( _lowerCAmelCase : str ): '''simple docstring''' return max(prefix_function(_lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod()
645
0
"""simple docstring""" import datetime import platform import subprocess from typing import Optional, Tuple, Union import numpy as np def a_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] ): '''simple docstring''' lowercase__ : Dict = f"""{sampling_rate}""" lowercase__ : Optional[Any] = '''1''' lowercase__ : Union[str, Any] = '''f32le''' lowercase__ : Any = [ '''ffmpeg''', '''-i''', '''pipe:0''', '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] try: with subprocess.Popen(_lowerCAmelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process: lowercase__ : Optional[int] = ffmpeg_process.communicate(_lowerCAmelCase ) except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error lowercase__ : str = output_stream[0] lowercase__ : str = np.frombuffer(_lowerCAmelCase , np.floataa ) if audio.shape[0] == 0: raise ValueError('Malformed soundfile' ) return audio def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : str = "f32le" , ): '''simple docstring''' lowercase__ : List[Any] = f"""{sampling_rate}""" lowercase__ : List[str] = '''1''' if format_for_conversion == "s16le": lowercase__ : Dict = 2 elif format_for_conversion == "f32le": lowercase__ : int = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) lowercase__ : Optional[Any] = platform.system() if system == "Linux": lowercase__ : Union[str, Any] = '''alsa''' lowercase__ : Tuple = '''default''' elif system == "Darwin": lowercase__ : List[Any] = '''avfoundation''' lowercase__ : Optional[int] = ''':0''' elif system == "Windows": lowercase__ : str = '''dshow''' lowercase__ : str = '''default''' lowercase__ : Any = [ '''ffmpeg''', '''-f''', format_, '''-i''', input_, '''-ac''', ac, '''-ar''', ar, '''-f''', format_for_conversion, '''-fflags''', '''nobuffer''', '''-hide_banner''', '''-loglevel''', '''quiet''', '''pipe:1''', ] lowercase__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample lowercase__ : Any = _ffmpeg_stream(_lowerCAmelCase , _lowerCAmelCase ) for item in iterator: yield item def a_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Dict , _lowerCAmelCase : Any = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = "f32le" , ): '''simple docstring''' if stream_chunk_s is not None: lowercase__ : Dict = stream_chunk_s else: lowercase__ : Dict = chunk_length_s lowercase__ : int = ffmpeg_microphone(_lowerCAmelCase , _lowerCAmelCase , format_for_conversion=_lowerCAmelCase ) if format_for_conversion == "s16le": lowercase__ : Dict = np.intaa lowercase__ : Optional[Any] = 2 elif format_for_conversion == "f32le": lowercase__ : Optional[Any] = np.floataa lowercase__ : Dict = 4 else: raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" ) if stride_length_s is None: lowercase__ : List[Any] = chunk_length_s / 6 lowercase__ : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample if isinstance(_lowerCAmelCase , (int, float) ): lowercase__ : Optional[int] = [stride_length_s, stride_length_s] lowercase__ : Tuple = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample lowercase__ : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample lowercase__ : Union[str, Any] = datetime.datetime.now() lowercase__ : int = datetime.timedelta(seconds=_lowerCAmelCase ) for item in chunk_bytes_iter(_lowerCAmelCase , _lowerCAmelCase , stride=(stride_left, stride_right) , stream=_lowerCAmelCase ): # Put everything back in numpy scale lowercase__ : Dict = np.frombuffer(item['raw'] , dtype=_lowerCAmelCase ) lowercase__ : List[str] = ( item['''stride'''][0] // size_of_sample, item['''stride'''][1] // size_of_sample, ) lowercase__ : Optional[int] = sampling_rate audio_time += delta if datetime.datetime.now() > audio_time + 10 * delta: # We're late !! SKIP continue yield item def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int = False ): '''simple docstring''' lowercase__ : str = b'''''' lowercase__ : List[Any] = stride if stride_left + stride_right >= chunk_len: raise ValueError( f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" ) lowercase__ : List[str] = 0 for raw in iterator: acc += raw if stream and len(_lowerCAmelCase ) < chunk_len: lowercase__ : Optional[Any] = (_stride_left, 0) yield {"raw": acc[:chunk_len], "stride": stride, "partial": True} else: while len(_lowerCAmelCase ) >= chunk_len: # We are flushing the accumulator lowercase__ : Dict = (_stride_left, stride_right) lowercase__ : Optional[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride} if stream: lowercase__ : Tuple = False yield item lowercase__ : Union[str, Any] = stride_left lowercase__ : Tuple = acc[chunk_len - stride_left - stride_right :] # Last chunk if len(_lowerCAmelCase ) > stride_left: lowercase__ : Tuple = {'''raw''': acc, '''stride''': (_stride_left, 0)} if stream: lowercase__ : Dict = False yield item def a_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : Tuple = 2**24 # 16Mo try: with subprocess.Popen(_lowerCAmelCase , stdout=subprocess.PIPE , bufsize=_lowerCAmelCase ) as ffmpeg_process: while True: lowercase__ : Optional[int] = ffmpeg_process.stdout.read(_lowerCAmelCase ) if raw == b"": break yield raw except FileNotFoundError as error: raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
718
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=7 , a=3 , a=1_8 , a=3_0 , a=4_0_0 , a=True , a=None , a=True , a=None , a=True , ) -> List[str]: lowercase__ : Tuple = size if size is not None else {'shortest_edge': 2_0} lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {'height': 1_8, 'width': 1_8} lowercase__ : Optional[int] = parent lowercase__ : Optional[int] = batch_size lowercase__ : str = num_channels lowercase__ : Any = image_size lowercase__ : Optional[Any] = min_resolution lowercase__ : int = max_resolution lowercase__ : List[Any] = do_resize lowercase__ : List[str] = size lowercase__ : str = do_center_crop lowercase__ : List[Any] = crop_size lowercase__ : Union[str, Any] = do_flip_channel_order def _UpperCAmelCase ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Optional[Any] = MobileViTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Tuple = MobileViTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , 'do_resize' ) ) self.assertTrue(hasattr(a , 'size' ) ) self.assertTrue(hasattr(a , 'do_center_crop' ) ) self.assertTrue(hasattr(a , 'center_crop' ) ) self.assertTrue(hasattr(a , 'do_flip_channel_order' ) ) def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 2_0} ) self.assertEqual(image_processor.crop_size , {'height': 1_8, 'width': 1_8} ) lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'shortest_edge': 4_2} ) self.assertEqual(image_processor.crop_size , {'height': 8_4, 'width': 8_4} ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> str: # Initialize image_processing lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : List[Any] = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Tuple: # Initialize image_processing lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Any = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def _UpperCAmelCase ( self ) -> Dict: # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowercase__ : Tuple = image_processing(a , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
645
0
"""simple docstring""" _UpperCamelCase : Optional[Any] = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ _UpperCamelCase : str = [{"""type""": """code""", """content""": INSTALL_CONTENT}] _UpperCamelCase : Union[str, Any] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
719
"""simple docstring""" import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase): def __init__( self , a , a=1_3 , a=7 , a=True , a=True , a=True , a=True , a=9_9 , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=1_6 , a=2 , a=0.02 , a=4 , ) -> Dict: lowercase__ : Optional[Any] = parent lowercase__ : Dict = batch_size lowercase__ : List[Any] = seq_length lowercase__ : int = is_training lowercase__ : str = use_attention_mask lowercase__ : Dict = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : Tuple = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : int = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Dict = hidden_dropout_prob lowercase__ : Tuple = attention_probs_dropout_prob lowercase__ : List[str] = max_position_embeddings lowercase__ : int = type_vocab_size lowercase__ : List[str] = type_sequence_label_size lowercase__ : Union[str, Any] = initializer_range lowercase__ : Optional[int] = num_choices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : List[str] = None if self.use_token_type_ids: lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Any = AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _UpperCAmelCase ( self ) -> Any: lowercase__ : Optional[int] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxAlbertModelTester(self ) @slow def _UpperCAmelCase ( self ) -> str: for model_class_name in self.all_model_classes: lowercase__ : str = model_class_name.from_pretrained('albert-base-v2' ) lowercase__ : Tuple = model(np.ones((1, 1) ) ) self.assertIsNotNone(a ) @require_flax class UpperCAmelCase_ ( unittest.TestCase): @slow def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : str = FlaxAlbertModel.from_pretrained('albert-base-v2' ) lowercase__ : Optional[int] = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) lowercase__ : Optional[Any] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase__ : Any = model(a , attention_mask=a )[0] lowercase__ : Tuple = (1, 1_1, 7_6_8) self.assertEqual(output.shape , a ) lowercase__ : Optional[Any] = np.array( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
645
0
"""simple docstring""" import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"): _UpperCamelCase : List[str] = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: _UpperCamelCase : Optional[Any] = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : List[Any] = (images / 2 + 0.5).clamp(0 , 1 ) lowercase__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowercase__ : Optional[int] = numpy_to_pil(__a ) return images def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' if images.ndim == 3: lowercase__ : str = images[None, ...] lowercase__ : List[Any] = (images * 255).round().astype('uint8' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowercase__ : Union[str, Any] = [Image.fromarray(image.squeeze() , mode='L' ) for image in images] else: lowercase__ : List[str] = [Image.fromarray(__a ) for image in images] return pil_images
720
"""simple docstring""" from collections.abc import Sequence def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) ) def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ): '''simple docstring''' lowercase__ : int = 0.0 for coeff in reversed(_lowerCAmelCase ): lowercase__ : List[Any] = result * x + coeff return result if __name__ == "__main__": _UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0) _UpperCamelCase : Dict = 1_0.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
645
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_segformer import SegformerImageProcessor _UpperCamelCase : List[Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _snake_case): def __init__( self , *a , **a ) -> List[str]: warnings.warn( 'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use SegformerImageProcessor instead.' , snake_case_ , ) super().__init__(*snake_case_ , **snake_case_ )
721
"""simple docstring""" import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _UpperCamelCase : Any = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def a_ ( _lowerCAmelCase : Optional[Any]=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_a)) class UpperCAmelCase_ ( _a): lowerCamelCase__ : str = None lowerCamelCase__ : Optional[Any] = None def _UpperCAmelCase ( self , a , a ) -> List[Any]: with TemporaryDirectory() as tmp_dir: lowercase__ : List[str] = dataset_module_factory(a , cache_dir=a ) lowercase__ : List[Any] = import_main_class(dataset_module.module_path , dataset=a ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=a , config_name=a , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=a ).replace(os.sep , '/' ), config.DATASET_INFO_FILENAME, ] ) lowercase__ : Union[str, Any] = cached_path(a , cache_dir=a ) self.assertTrue(os.path.exists(a ) ) @pytest.mark.integration def a_ ( _lowerCAmelCase : str ): '''simple docstring''' lowercase__ : Union[str, Any] = tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple' lowercase__ : int = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : Optional[int] = import_main_class(dataset_module.module_path ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam lowercase__ : Optional[int] = None builder_instance.download_and_prepare() lowercase__ : Optional[int] = builder_instance.as_dataset() assert ds @pytest.mark.integration def a_ ( _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Optional[int] = dataset_module_factory('wikipedia' , cache_dir=_lowerCAmelCase ) lowercase__ : List[str] = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase ) lowercase__ : DatasetBuilder = builder_cls( cache_dir=_lowerCAmelCase , config_name='20220301.frr' , hash=dataset_module.hash , ) lowercase__ : Union[str, Any] = builder_instance.as_streaming_dataset() assert ds assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) assert "train" in ds assert isinstance(ds['train'] , _lowerCAmelCase ) assert next(iter(ds['train'] ) )
645
0
"""simple docstring""" import argparse import copy def a_ ( _lowerCAmelCase : List[str] ): '''simple docstring''' lowercase__ : int = {} with open(_lowerCAmelCase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: lowercase__ : List[str] = [] _list.append([line.split()[1], line.split()[2]] ) lowercase__ : Tuple = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: lowercase__ : List[Any] = [] _list.append([line.split()[0], line.split()[2]] ) lowercase__ : Union[str, Any] = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ): '''simple docstring''' with open(_lowerCAmelCase ) as f: lowercase__ : Optional[int] = f.read(1 ) lowercase__ : List[Any] = start_node lowercase__ : List[Any] = [] lowercase__ : str = start_node lowercase__ : str = 0 while visiting not in first_solution: lowercase__ : Optional[int] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowerCAmelCase ) and k[0] not in first_solution: lowercase__ : List[Any] = k[1] lowercase__ : str = k[0] first_solution.append(_lowerCAmelCase ) lowercase__ : Any = distance_of_first_solution + int(_lowerCAmelCase ) lowercase__ : Optional[int] = best_node first_solution.append(_lowerCAmelCase ) lowercase__ : str = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 lowercase__ : str = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def a_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any ): '''simple docstring''' lowercase__ : Tuple = [] for n in solution[1:-1]: lowercase__ : Dict = solution.index(_lowerCAmelCase ) for kn in solution[1:-1]: lowercase__ : Tuple = solution.index(_lowerCAmelCase ) if n == kn: continue lowercase__ : Union[str, Any] = copy.deepcopy(_lowerCAmelCase ) lowercase__ : Optional[int] = kn lowercase__ : List[Any] = n lowercase__ : List[Any] = 0 for k in _tmp[:-1]: lowercase__ : Optional[int] = _tmp[_tmp.index(_lowerCAmelCase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: lowercase__ : Optional[int] = distance + int(i[1] ) _tmp.append(_lowerCAmelCase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) lowercase__ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowerCAmelCase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : str = 1 lowercase__ : List[Any] = first_solution lowercase__ : Any = [] lowercase__ : str = distance_of_first_solution lowercase__ : str = solution while count <= iters: lowercase__ : Union[str, Any] = find_neighborhood(_lowerCAmelCase , _lowerCAmelCase ) lowercase__ : Dict = 0 lowercase__ : int = neighborhood[index_of_best_solution] lowercase__ : Optional[int] = len(_lowerCAmelCase ) - 1 lowercase__ : List[Any] = False while not found: lowercase__ : List[Any] = 0 while i < len(_lowerCAmelCase ): if best_solution[i] != solution[i]: lowercase__ : List[str] = best_solution[i] lowercase__ : Dict = solution[i] break lowercase__ : Any = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) lowercase__ : str = True lowercase__ : int = best_solution[:-1] lowercase__ : Any = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: lowercase__ : Optional[int] = cost lowercase__ : str = solution else: lowercase__ : Optional[int] = index_of_best_solution + 1 lowercase__ : List[Any] = neighborhood[index_of_best_solution] if len(_lowerCAmelCase ) >= size: tabu_list.pop(0 ) lowercase__ : Optional[int] = count + 1 return best_solution_ever, best_cost def a_ ( _lowerCAmelCase : str=None ): '''simple docstring''' lowercase__ : List[str] = generate_neighbours(args.File ) lowercase__ : Optional[Any] = generate_first_solution( args.File , _lowerCAmelCase ) lowercase__ : int = tabu_search( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , args.Iterations , args.Size , ) print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": _UpperCamelCase : List[str] = argparse.ArgumentParser(description="Tabu Search") parser.add_argument( "-f", "--File", type=str, help="Path to the file containing the data", required=True, ) parser.add_argument( "-i", "--Iterations", type=int, help="How many iterations the algorithm should perform", required=True, ) parser.add_argument( "-s", "--Size", type=int, help="Size of the tabu list", required=True ) # Pass the arguments to main method main(parser.parse_args())
700
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def a_ ( _lowerCAmelCase : dict ): '''simple docstring''' return (data["data"], data["target"]) def a_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): '''simple docstring''' lowercase__ : Any = XGBRegressor(verbosity=0 , random_state=42 ) xgb.fit(_lowerCAmelCase , _lowerCAmelCase ) # Predict target for test data lowercase__ : str = xgb.predict(_lowerCAmelCase ) lowercase__ : Union[str, Any] = predictions.reshape(len(_lowerCAmelCase ) , 1 ) return predictions def a_ ( ): '''simple docstring''' lowercase__ : Optional[Any] = fetch_california_housing() lowercase__ , lowercase__ : str = data_handling(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = train_test_split( _lowerCAmelCase , _lowerCAmelCase , test_size=0.2_5 , random_state=1 ) lowercase__ : Any = xgboost(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Error printing print(f"""Mean Absolute Error : {mean_absolute_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) print(f"""Mean Square Error : {mean_squared_error(_lowerCAmelCase , _lowerCAmelCase )}""" ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
645
0
"""simple docstring""" import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class UpperCAmelCase_ ( __UpperCAmelCase): def _UpperCAmelCase ( self ) -> int: lowercase__ : Optional[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) ) self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) ) class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=3 , a=3_2 , a=0.25 , a=8 , a=8 , a=6 , a=3_2 , a=True , a=True , a=True , a="relu6" , a=1_2_8_0 , a=0.1 , a=0.02 , a=True , a=True , a=1_0 , a=None , ) -> Any: lowercase__ : Tuple = parent lowercase__ : int = batch_size lowercase__ : int = num_channels lowercase__ : Dict = image_size lowercase__ : Any = depth_multiplier lowercase__ : Any = depth_divisible_by lowercase__ : Optional[Any] = min_depth lowercase__ : Optional[Any] = expand_ratio lowercase__ : Optional[Any] = tf_padding lowercase__ : Any = output_stride lowercase__ : List[Any] = first_layer_is_expansion lowercase__ : Optional[Any] = finegrained_output lowercase__ : List[str] = hidden_act lowercase__ : int = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowercase__ : Any = classifier_dropout_prob lowercase__ : Dict = use_labels lowercase__ : Any = is_training lowercase__ : Any = num_labels lowercase__ : List[Any] = initializer_range lowercase__ : Optional[int] = scope def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None lowercase__ : List[str] = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase__ : Any = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCAmelCase ( self ) -> Optional[int]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a , a ) -> Optional[int]: lowercase__ : Tuple = MobileNetVaModel(config=_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowercase__ : Any = model(_lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def _UpperCAmelCase ( self , a , a , a , a ) -> int: lowercase__ : List[str] = self.num_labels lowercase__ : Optional[int] = MobileNetVaForImageClassification(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowercase__ : List[str] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self , a , a , a , a ) -> Union[str, Any]: lowercase__ : List[str] = self.num_labels lowercase__ : str = MobileNetVaForSemanticSegmentation(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() lowercase__ : Union[str, Any] = model(_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase__ : Optional[Any] = model(_lowerCamelCase , labels=_lowerCamelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[Any] = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs lowercase__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase): lowerCamelCase__ : Union[str, Any] = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) lowerCamelCase__ : Tuple = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) lowerCamelCase__ : Tuple = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False lowerCamelCase__ : List[str] = False def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : int = MobileNetVaModelTester(self ) lowercase__ : List[str] = MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV2 does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Optional[Any]: pass @unittest.skip(reason='MobileNetV2 does not support input and output embeddings' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='MobileNetV2 does not output attentions' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> str: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Dict = model_class(_lowerCamelCase ) lowercase__ : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Union[str, Any] = [*signature.parameters.keys()] lowercase__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _lowerCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(_lowerCamelCase ) model.to(_lowerCamelCase ) model.eval() with torch.no_grad(): lowercase__ : Tuple = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) ) lowercase__ : Union[str, Any] = outputs.hidden_states lowercase__ : Dict = 1_6 self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase ) lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : str = True check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase ) @slow def _UpperCAmelCase ( self ) -> Dict: for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = MobileNetVaModel.from_pretrained(_lowerCamelCase ) self.assertIsNotNone(_lowerCamelCase ) def a_ ( ): '''simple docstring''' lowercase__ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Union[str, Any]: return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ : int = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase ) lowercase__ : List[Any] = self.default_image_processor lowercase__ : Union[str, Any] = prepare_img() lowercase__ : Tuple = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**_lowerCamelCase ) # verify the logits lowercase__ : Optional[Any] = torch.Size((1, 1_0_0_1) ) self.assertEqual(outputs.logits.shape , _lowerCamelCase ) lowercase__ : str = torch.tensor([0.2_445, -1.1_993, 0.1_905] ).to(_lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Optional[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) lowercase__ : Optional[Any] = model.to(_lowerCamelCase ) lowercase__ : Any = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' ) lowercase__ : List[Any] = prepare_img() lowercase__ : List[Any] = image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase ) # forward pass with torch.no_grad(): lowercase__ : Optional[int] = model(**_lowerCamelCase ) lowercase__ : Optional[int] = outputs.logits # verify the logits lowercase__ : int = torch.Size((1, 2_1, 6_5, 6_5) ) self.assertEqual(logits.shape , _lowerCamelCase ) lowercase__ : Tuple = torch.tensor( [ [[17.5_790, 17.7_581, 18.3_355], [18.3_257, 18.4_230, 18.8_973], [18.6_169, 18.8_650, 19.2_187]], [[-2.1_595, -2.0_977, -2.3_741], [-2.4_226, -2.3_028, -2.6_835], [-2.7_819, -2.5_991, -2.7_706]], [[4.2_058, 4.8_317, 4.7_638], [4.4_136, 5.0_361, 4.9_383], [4.5_028, 4.9_644, 4.8_734]], ] , device=_lowerCamelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
701
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=1_0 , a=3 , a=2 , a=2 , a=2 , a=True , a=True , a=3_2 , a=5 , a=4 , a=3_7 , a="gelu" , a=0.1 , a=0.1 , a=1_0 , a=0.02 , a=0.9 , a=None , ) -> Optional[Any]: lowercase__ : str = parent lowercase__ : int = batch_size lowercase__ : Union[str, Any] = image_size lowercase__ : Optional[Any] = num_channels lowercase__ : Dict = patch_size lowercase__ : Tuple = tubelet_size lowercase__ : Optional[int] = num_frames lowercase__ : Optional[int] = is_training lowercase__ : int = use_labels lowercase__ : Optional[int] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : Any = intermediate_size lowercase__ : str = hidden_act lowercase__ : List[Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : List[Any] = initializer_range lowercase__ : str = mask_ratio lowercase__ : Optional[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase__ : Optional[Any] = (image_size // patch_size) ** 2 lowercase__ : str = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase__ : str = int(mask_ratio * self.seq_length ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : int = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Tuple: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : Dict = VideoMAEModel(config=a ) model.to(a ) model.eval() lowercase__ : Tuple = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self , a , a , a ) -> Union[str, Any]: lowercase__ : str = VideoMAEForPreTraining(a ) model.to(a ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Any = torch.ones((self.num_masks,) ) lowercase__ : str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase__ : Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase__ : str = model(a , a ) # model only returns predictions for masked patches lowercase__ : str = mask.sum().item() lowercase__ : int = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _UpperCAmelCase ( self ) -> str: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs lowercase__ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Tuple = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowerCamelCase__ : Optional[int] = ( {"feature-extraction": VideoMAEModel, "video-classification": VideoMAEForVideoClassification} if is_torch_available() else {} ) lowerCamelCase__ : Any = False lowerCamelCase__ : Any = False lowerCamelCase__ : Union[str, Any] = False lowerCamelCase__ : str = False def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = VideoMAEModelTester(self ) lowercase__ : Optional[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=3_7 ) def _UpperCAmelCase ( self , a , a , a=False ) -> Optional[int]: lowercase__ : Union[str, Any] = copy.deepcopy(a ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase__ : Optional[Any] = torch.ones((self.model_tester.num_masks,) ) lowercase__ : Any = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase__ : Any = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase__ : Union[str, Any] = bool_masked_pos.to(a ) if return_labels: if model_class in [ *get_values(a ), ]: lowercase__ : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='VideoMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*a ) @slow def _UpperCAmelCase ( self ) -> str: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : List[Any] = VideoMAEModel.from_pretrained(a ) self.assertIsNotNone(a ) def _UpperCAmelCase ( self ) -> Optional[Any]: if not self.has_attentions: pass else: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Any = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase__ : Optional[Any] = True lowercase__ : int = False lowercase__ : Any = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Dict = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : str = True lowercase__ : List[str] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : List[str] = len(a ) # Check attention is always last and order is fine lowercase__ : Optional[int] = True lowercase__ : List[str] = True lowercase__ : int = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : List[str] = model(**self._prepare_for_class(a , a ) ) self.assertEqual(out_len + 1 , len(a ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(a ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _UpperCAmelCase ( self ) -> Optional[int]: def check_hidden_states_output(a , a , a ): lowercase__ : Optional[int] = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) ) lowercase__ : Optional[int] = outputs.hidden_states lowercase__ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(a ) , a ) lowercase__ : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks lowercase__ : Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = True check_hidden_states_output(a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Union[str, Any] = True check_hidden_states_output(a , a , a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> List[Any]: pass def a_ ( ): '''simple docstring''' lowercase__ : int = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase__ : str = np.load(_lowerCAmelCase ) return list(_lowerCAmelCase ) @require_torch @require_vision class UpperCAmelCase_ ( unittest.TestCase): @cached_property def _UpperCAmelCase ( self ) -> Optional[Any]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> int: lowercase__ : Dict = VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to( a ) lowercase__ : str = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : int = image_processor(a , return_tensors='pt' ).to(a ) # forward pass with torch.no_grad(): lowercase__ : Union[str, Any] = model(**a ) # verify the logits lowercase__ : str = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , a ) lowercase__ : List[Any] = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> List[str]: lowercase__ : Optional[int] = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(a ) lowercase__ : Optional[Any] = self.default_image_processor lowercase__ : List[str] = prepare_video() lowercase__ : str = image_processor(a , return_tensors='pt' ).to(a ) # add boolean mask, indicating which patches to mask lowercase__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' ) lowercase__ : str = torch.load(a ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**a ) # verify the logits lowercase__ : Dict = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase__ : List[str] = torch.tensor( [[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=a ) self.assertEqual(outputs.logits.shape , a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase__ : List[Any] = torch.tensor([0.5_142] , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase__ : Tuple = VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=a ).to( a ) with torch.no_grad(): lowercase__ : Any = model(**a ) lowercase__ : List[Any] = torch.tensor(torch.tensor([0.6_469] ) , device=a ) self.assertTrue(torch.allclose(outputs.loss , a , atol=1e-4 ) )
645
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _UpperCamelCase : int = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n' _UpperCamelCase : List[Any] = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n' _UpperCamelCase : str = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n' _UpperCamelCase : Tuple = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n' _UpperCamelCase : Dict = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class UpperCAmelCase_ ( datasets.Metric): def _UpperCAmelCase ( self ) -> Optional[int]: return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' ) ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , ) def _UpperCAmelCase ( self , a , a , a=[1, 1_0, 1_0_0] , a=4 , a=3.0 ) -> Optional[int]: if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError('This metric is currently not supported on Windows.' ) with ThreadPoolExecutor(max_workers=__lowerCamelCase ) as executor: lowercase__ : Optional[Any] = [] lowercase__ : Any = Counter() lowercase__ : List[str] = 0 lowercase__ : str = defaultdict(__lowerCamelCase ) for task_id, (candidates, test_case) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ): for candidate in candidates: lowercase__ : Union[str, Any] = candidate + '''\n''' + test_case lowercase__ : List[str] = (test_program, timeout, task_id, completion_id[task_id]) lowercase__ : List[Any] = executor.submit(__lowerCamelCase , *__lowerCamelCase ) futures.append(__lowerCamelCase ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(__lowerCamelCase ): lowercase__ : int = future.result() results[result["task_id"]].append((result['completion_id'], result) ) lowercase__ : List[str] = [], [] for result in results.values(): result.sort() lowercase__ : List[str] = [r[1]['''passed'''] for r in result] total.append(len(__lowerCamelCase ) ) correct.append(sum(__lowerCamelCase ) ) lowercase__ : List[str] = np.array(__lowerCamelCase ) lowercase__ : int = np.array(__lowerCamelCase ) lowercase__ : List[Any] = k lowercase__ : List[str] = {f"""pass@{k}""": estimate_pass_at_k(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def a_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ): '''simple docstring''' def estimator(_lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ): lowercase__ : List[Any] = itertools.repeat(lowerCamelCase_ , len(lowerCamelCase_ ) ) else: assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ) lowercase__ : Optional[Any] = iter(lowerCamelCase_ ) return np.array([estimator(int(lowerCamelCase_ ) , int(lowerCamelCase_ ) , lowerCamelCase_ ) for n, c in zip(lowerCamelCase_ , lowerCamelCase_ )] )
702
"""simple docstring""" import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() _UpperCamelCase : Dict = logging.get_logger(__name__) _UpperCamelCase : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "encoder.layer_norm_for_extract": "layer_norm_for_extract", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "label_embs_concat": "label_embeddings_concat", "mask_emb": "masked_spec_embed", "spk_proj": "speaker_proj", } _UpperCamelCase : List[str] = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "label_embeddings_concat", "speaker_proj", "layer_norm_for_extract", ] def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): '''simple docstring''' for attribute in key.split('.' ): lowercase__ : Dict = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: lowercase__ : Optional[int] = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: lowercase__ : Optional[int] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : Dict = value elif weight_type == "weight_v": lowercase__ : List[str] = value elif weight_type == "bias": lowercase__ : Optional[Any] = value else: lowercase__ : List[str] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): '''simple docstring''' lowercase__ : Tuple = [] lowercase__ : List[str] = fairseq_model.state_dict() lowercase__ : Union[str, Any] = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Optional[int] = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) lowercase__ : Optional[Any] = True else: for key, mapped_key in MAPPING.items(): lowercase__ : List[Any] = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key): # special case since naming is very similar continue lowercase__ : int = True if "*" in mapped_key: lowercase__ : Optional[int] = name.split(_lowerCAmelCase )[0].split('.' )[-2] lowercase__ : List[str] = mapped_key.replace('*' , _lowerCAmelCase ) if "weight_g" in name: lowercase__ : List[Any] = 'weight_g' elif "weight_v" in name: lowercase__ : int = 'weight_v' elif "bias" in name: lowercase__ : Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Union[str, Any] = 'weight' else: lowercase__ : int = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def a_ ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Dict ): '''simple docstring''' lowercase__ : int = full_name.split('conv_layers.' )[-1] lowercase__ : int = name.split('.' ) lowercase__ : int = int(items[0] ) lowercase__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) lowercase__ : Union[str, Any] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) lowercase__ : Optional[int] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) lowercase__ : List[Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) lowercase__ : int = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def a_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Tuple=True ): '''simple docstring''' if config_path is not None: lowercase__ : Any = UniSpeechSatConfig.from_pretrained(_lowerCAmelCase ) else: lowercase__ : Any = UniSpeechSatConfig() lowercase__ : Union[str, Any] = '' if is_finetuned: lowercase__ : Optional[Any] = UniSpeechSatForCTC(_lowerCAmelCase ) else: lowercase__ : List[Any] = UniSpeechSatForPreTraining(_lowerCAmelCase ) lowercase__ , lowercase__ , lowercase__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) lowercase__ : Union[str, Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _UpperCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) _UpperCamelCase : str = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
645
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _UpperCamelCase : str = { "configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Optional[int] = ["LlamaTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : int = ["LlamaTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Any = [ "LlamaForCausalLM", "LlamaModel", "LlamaPreTrainedModel", "LlamaForSequenceClassification", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
703
"""simple docstring""" import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class UpperCAmelCase_ : def __init__( self , a , a=1_3 , a=3_2 , a=2 , a=3 , a=1_6 , a=[1, 2, 1] , a=[2, 2, 4] , a=2 , a=2.0 , a=True , a=0.0 , a=0.0 , a=0.1 , a="gelu" , a=False , a=True , a=0.02 , a=1e-5 , a=True , a=None , a=True , a=1_0 , a=8 , a=["stage1", "stage2", "stage3"] , a=[1, 2, 3] , ) -> int: lowercase__ : int = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Dict = image_size lowercase__ : str = patch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[str] = embed_dim lowercase__ : Any = depths lowercase__ : Dict = num_heads lowercase__ : List[str] = window_size lowercase__ : int = mlp_ratio lowercase__ : Tuple = qkv_bias lowercase__ : Union[str, Any] = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Tuple = drop_path_rate lowercase__ : List[str] = hidden_act lowercase__ : Optional[Any] = use_absolute_embeddings lowercase__ : Optional[Any] = patch_norm lowercase__ : Any = layer_norm_eps lowercase__ : List[Any] = initializer_range lowercase__ : List[str] = is_training lowercase__ : int = scope lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = type_sequence_label_size lowercase__ : List[str] = encoder_stride lowercase__ : Optional[Any] = out_features lowercase__ : Dict = out_indices def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None if self.use_labels: lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Tuple = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _UpperCAmelCase ( self , a , a , a ) -> Dict: lowercase__ : Tuple = MaskFormerSwinModel(config=a ) model.to(a ) model.eval() lowercase__ : str = model(a ) lowercase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def _UpperCAmelCase ( self , a , a , a ) -> Optional[int]: lowercase__ : List[Any] = MaskFormerSwinBackbone(config=a ) model.to(a ) model.eval() lowercase__ : int = model(a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] ) # verify ValueError with self.parent.assertRaises(a ): lowercase__ : Dict = ['stem'] lowercase__ : List[str] = MaskFormerSwinBackbone(config=a ) def _UpperCAmelCase ( self ) -> str: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _a , _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) lowerCamelCase__ : List[str] = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {} lowerCamelCase__ : str = False lowerCamelCase__ : Dict = False lowerCamelCase__ : Any = False lowerCamelCase__ : Dict = False lowerCamelCase__ : int = False def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = MaskFormerSwinModelTester(self ) lowercase__ : Tuple = ConfigTester(self , config_class=a , embed_dim=3_7 ) @require_torch_multi_gpu @unittest.skip( reason=( '`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with' ' `nn.DataParallel`' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> Tuple: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> str: return def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*a ) @unittest.skip('Swin does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: pass @unittest.skip('Swin does not support feedforward chunking' ) def _UpperCAmelCase ( self ) -> Tuple: pass def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[str] = model_class(a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(a , nn.Linear ) ) def _UpperCAmelCase ( self ) -> str: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) lowercase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[Any] = [*signature.parameters.keys()] lowercase__ : List[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a ) @unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' ) def _UpperCAmelCase ( self ) -> List[Any]: pass @unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' ) def _UpperCAmelCase ( self ) -> int: pass def _UpperCAmelCase ( self , a , a , a , a ) -> Tuple: lowercase__ : Dict = model_class(a ) model.to(a ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(a , a ) ) lowercase__ : List[Any] = outputs.hidden_states lowercase__ : str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(a ) , a ) # Swin has a different seq_length lowercase__ : Dict = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , a ) def _UpperCAmelCase ( self ) -> Optional[int]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : Union[str, Any] = 3 lowercase__ : str = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase__ : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase__ : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase__ : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowercase__ : List[str] = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : int = True self.check_hidden_states_output(a , a , a , (padded_height, padded_width) ) @unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' ) def _UpperCAmelCase ( self ) -> Optional[int]: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass @unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' ) def _UpperCAmelCase ( self ) -> Any: pass def _UpperCAmelCase ( self ) -> Any: lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(a ): lowercase__ : Union[str, Any] = 0 return t def check_equivalence(a , a , a , a={} ): with torch.no_grad(): lowercase__ : Optional[Any] = model(**a , return_dict=a , **a ) lowercase__ : Optional[int] = model(**a , return_dict=a , **a ).to_tuple() def recursive_check(a , a ): if isinstance(a , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a , a ): recursive_check(a , a ) elif isinstance(a , a ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(a , a ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(a ) , set_nan_tensor_to_zero(a ) , atol=1e-5 ) , msg=( 'Tuple and dict output are not equal. Difference:' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}. Dict has""" f""" `nan`: {torch.isnan(a ).any()} and `inf`: {torch.isinf(a )}.""" ) , ) recursive_check(a , a ) for model_class in self.all_model_classes: lowercase__ : Any = model_class(a ) model.to(a ) model.eval() lowercase__ : Tuple = self._prepare_for_class(a , a ) lowercase__ : Optional[Any] = self._prepare_for_class(a , a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : List[Any] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a ) lowercase__ : Any = self._prepare_for_class(a , a ) lowercase__ : int = self._prepare_for_class(a , a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) lowercase__ : Dict = self._prepare_for_class(a , a , return_labels=a ) lowercase__ : Optional[int] = self._prepare_for_class(a , a , return_labels=a ) check_equivalence(a , a , a , {'output_hidden_states': True} ) @require_torch class UpperCAmelCase_ ( unittest.TestCase , _a): lowerCamelCase__ : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else () lowerCamelCase__ : Optional[int] = MaskFormerSwinConfig def _UpperCAmelCase ( self ) -> Dict: lowercase__ : Optional[int] = MaskFormerSwinModelTester(self ) def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : int = inputs_dict['pixel_values'].shape[0] for backbone_class in self.all_model_classes: lowercase__ : Optional[Any] = backbone_class(a ) backbone.to(a ) backbone.eval() lowercase__ : Union[str, Any] = backbone(**a ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , a ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowercase__ : List[str] = backbone(**a , output_hidden_states=a ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowercase__ , lowercase__ , lowercase__ : int = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowercase__ : List[Any] = backbone(**a , output_attentions=a ) self.assertIsNotNone(outputs.attentions )
645
0